hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c46098610964543336f1caf9a6c92cb98615a0c
| 5,335
|
py
|
Python
|
example/distill/nlp/reader.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 90
|
2020-04-21T01:46:10.000Z
|
2022-02-10T09:09:34.000Z
|
example/distill/nlp/reader.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 37
|
2018-03-02T22:41:15.000Z
|
2020-04-22T16:48:36.000Z
|
example/distill/nlp/reader.py
|
wangxicoding/edl
|
75d651e72e5297aba2e597588cf958ea336deb4e
|
[
"Apache-2.0"
] | 34
|
2018-03-02T23:28:25.000Z
|
2020-03-25T08:50:29.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os
import csv
import sys
from paddlehub.dataset import InputExample
from paddlehub.common.dir import DATA_HOME
from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset
import paddle as P
import paddle.fluid.dygraph as D
import numpy as np
def space_tokenizer(i):
return i.split()
def pad_batch_data(data, dtype, pad_idx=0, max_len=-1):
if max_len <= 0:
for s in data:
if len(s) > max_len:
max_len = len(s)
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in data
])
return np.array(inst_data).astype(dtype)
class ChnSentiCorp(BaseNLPDataset):
def __init__(self):
base_path = "./data/"
super(ChnSentiCorp, self).__init__(
base_path=base_path,
train_file="train.part.0",
dev_file="dev.part.0",
test_file="test.part.0",
label_file=None,
label_list=["0", "1"], )
self._word_dict = None
def __read_file(self, input_file):
"""
data file format:
origin sentence\tword segment sentence\tlabel
"""
with codecs.open(input_file, "r", encoding="UTF-8") as f:
for line in f:
line = line.strip()
if len(line) <= 0:
continue
arr = line.split("\t")
#print("line:", len(arr))
yield arr
def _read_file(self, input_file, phase=None):
"""
[(seq_id,label,origin sentence)]
"""
seq_id = 0
examples = []
for t in self.__read_file(input_file):
if len(t) == 2:
#example = InputExample(
# guid=seq_id, label=t[1], text_a=t[0])
#print("t2", t[1])
assert len(t) != 2, "data format error:" + t
elif len(t) == 3:
example = InputExample(guid=seq_id, label=t[2], text_a=t[0])
#print("t3", t[2])
else:
assert False, 'invalid format'
seq_id += 1
examples.append(example)
return examples
def student_word_dict(self, vocab_file):
"""
{
word->word_idx
}
"""
with codecs.open(vocab_file, "r", encoding="UTF-8") as f:
self._word_dict = {
i.strip(): l
for l, i in enumerate(f.readlines())
}
return self._word_dict
def student_reader(self, input_file, word_dict):
"""
return [([segment_sentence_idxs], label, sentence), ()...]
"""
def reader():
input_files = []
if isinstance(input_file, str):
input_files.append(input_file)
else:
input_files = input_file
assert isinstance(input_file, list)
for data_file in input_files:
print("open file:", data_file)
for t in self.__read_file(data_file):
s = []
for word in space_tokenizer(t[1]):
idx = word_dict[
word] if word in word_dict else word_dict['[UNK]']
s.append(idx)
yield s, t[2], t[0]
return reader
def batch_reader(self, input_file, word_dict, batch_size, shuffle=True):
def reader():
if shuffle:
s_reader = P.reader.shuffle(
self.student_reader(input_file, word_dict),
buf_size=100000)
else:
s_reader = self.student_reader(input_file, word_dict)
b = [[], [], []]
for rec in s_reader():
if len(b[0]) == batch_size:
yield b
b = [[], [], []]
continue
for i in range(len(rec)):
b[i].append(rec[i])
if len(b[0]) > 0:
yield b
return reader
def pad_batch_reader(self, input_file, word_dict, batch_size,
shuffle=True):
def reader():
b_reader = self.batch_reader(
input_file, word_dict, batch_size, shuffle=shuffle)
for b in b_reader():
b[0] = D.base.to_variable(pad_batch_data(b[0], 'int64'))
b[1] = D.base.to_variable(np.array(b[1]).astype('int64'))
yield b
return reader
if __name__ == '__main__':
ds = ChnSentiCorp()
ds._read_file("./data/train.part.0")
ds.student_reader("./data/train.part.0", "./data/vocab.bow.txt")
| 30.485714
| 78
| 0.530834
|
import codecs
import os
import csv
import sys
from paddlehub.dataset import InputExample
from paddlehub.common.dir import DATA_HOME
from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset
import paddle as P
import paddle.fluid.dygraph as D
import numpy as np
def space_tokenizer(i):
return i.split()
def pad_batch_data(data, dtype, pad_idx=0, max_len=-1):
if max_len <= 0:
for s in data:
if len(s) > max_len:
max_len = len(s)
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in data
])
return np.array(inst_data).astype(dtype)
class ChnSentiCorp(BaseNLPDataset):
def __init__(self):
base_path = "./data/"
super(ChnSentiCorp, self).__init__(
base_path=base_path,
train_file="train.part.0",
dev_file="dev.part.0",
test_file="test.part.0",
label_file=None,
label_list=["0", "1"], )
self._word_dict = None
def __read_file(self, input_file):
with codecs.open(input_file, "r", encoding="UTF-8") as f:
for line in f:
line = line.strip()
if len(line) <= 0:
continue
arr = line.split("\t")
yield arr
def _read_file(self, input_file, phase=None):
seq_id = 0
examples = []
for t in self.__read_file(input_file):
if len(t) == 2:
assert len(t) != 2, "data format error:" + t
elif len(t) == 3:
example = InputExample(guid=seq_id, label=t[2], text_a=t[0])
else:
assert False, 'invalid format'
seq_id += 1
examples.append(example)
return examples
def student_word_dict(self, vocab_file):
with codecs.open(vocab_file, "r", encoding="UTF-8") as f:
self._word_dict = {
i.strip(): l
for l, i in enumerate(f.readlines())
}
return self._word_dict
def student_reader(self, input_file, word_dict):
def reader():
input_files = []
if isinstance(input_file, str):
input_files.append(input_file)
else:
input_files = input_file
assert isinstance(input_file, list)
for data_file in input_files:
print("open file:", data_file)
for t in self.__read_file(data_file):
s = []
for word in space_tokenizer(t[1]):
idx = word_dict[
word] if word in word_dict else word_dict['[UNK]']
s.append(idx)
yield s, t[2], t[0]
return reader
def batch_reader(self, input_file, word_dict, batch_size, shuffle=True):
def reader():
if shuffle:
s_reader = P.reader.shuffle(
self.student_reader(input_file, word_dict),
buf_size=100000)
else:
s_reader = self.student_reader(input_file, word_dict)
b = [[], [], []]
for rec in s_reader():
if len(b[0]) == batch_size:
yield b
b = [[], [], []]
continue
for i in range(len(rec)):
b[i].append(rec[i])
if len(b[0]) > 0:
yield b
return reader
def pad_batch_reader(self, input_file, word_dict, batch_size,
shuffle=True):
def reader():
b_reader = self.batch_reader(
input_file, word_dict, batch_size, shuffle=shuffle)
for b in b_reader():
b[0] = D.base.to_variable(pad_batch_data(b[0], 'int64'))
b[1] = D.base.to_variable(np.array(b[1]).astype('int64'))
yield b
return reader
if __name__ == '__main__':
ds = ChnSentiCorp()
ds._read_file("./data/train.part.0")
ds.student_reader("./data/train.part.0", "./data/vocab.bow.txt")
| true
| true
|
1c460c1837c4e7c5359fc82cd3f26054a7ebdf50
| 179
|
py
|
Python
|
needle/engines/base.py
|
VICEMedia/needle
|
c2d28ee07278f1d0bd7ace6a2cb65cfea24f2a7e
|
[
"BSD-3-Clause"
] | 144
|
2017-04-23T08:52:52.000Z
|
2022-03-15T03:40:37.000Z
|
new_pytest_needle/engines/base.py
|
Gadzillion/new_pytest_needle
|
b86de146c443a8377cfab9750aff187c0cb0852d
|
[
"MIT"
] | 35
|
2015-01-16T15:24:35.000Z
|
2017-04-02T22:35:05.000Z
|
new_pytest_needle/engines/base.py
|
Gadzillion/new_pytest_needle
|
b86de146c443a8377cfab9750aff187c0cb0852d
|
[
"MIT"
] | 24
|
2017-04-23T08:52:57.000Z
|
2022-02-02T11:57:21.000Z
|
class EngineBase(object):
"""
Base class for diff engines.
"""
def assertSameFiles(self, output_file, baseline_file, threshold):
raise NotImplementedError
| 25.571429
| 69
| 0.687151
|
class EngineBase(object):
def assertSameFiles(self, output_file, baseline_file, threshold):
raise NotImplementedError
| true
| true
|
1c460cfe2369acdf089542529e5400b016579622
| 4,298
|
py
|
Python
|
temboo/core/Library/LastFm/Artist/GetTopTracks.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/LastFm/Artist/GetTopTracks.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/LastFm/Artist/GetTopTracks.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTopTracks
# Retrieves the top tracks by an artist on Last.fm, ordered by popularity.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTopTracks(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTopTracks Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTopTracks, self).__init__(temboo_session, '/Library/LastFm/Artist/GetTopTracks')
def new_input_set(self):
return GetTopTracksInputSet()
def _make_result_set(self, result, path):
return GetTopTracksResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTopTracksChoreographyExecution(session, exec_id, path)
class GetTopTracksInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTopTracks
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) Your Last.fm API Key.)
"""
super(GetTopTracksInputSet, self)._set_input('APIKey', value)
def set_Artist(self, value):
"""
Set the value of the Artist input for this Choreo. ((conditional, string) The artist name. Required unless providing MbID.)
"""
super(GetTopTracksInputSet, self)._set_input('Artist', value)
def set_AutoCorrect(self, value):
"""
Set the value of the AutoCorrect input for this Choreo. ((optional, boolean) Transform misspelled artist names into correct artist names. The corrected artist name will be returned in the response. Defaults to 0.)
"""
super(GetTopTracksInputSet, self)._set_input('AutoCorrect', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The number of results to fetch per page. Defaults to 50.)
"""
super(GetTopTracksInputSet, self)._set_input('Limit', value)
def set_MbID(self, value):
"""
Set the value of the MbID input for this Choreo. ((conditional, string) The musicbrainz id for the artist. Required unless providing Artist.)
"""
super(GetTopTracksInputSet, self)._set_input('MbID', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number to fetch. Defaults to 1.)
"""
super(GetTopTracksInputSet, self)._set_input('Page', value)
class GetTopTracksResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTopTracks Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Last.fm.)
"""
return self._output.get('Response', None)
class GetTopTracksChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTopTracksResultSet(response, path)
| 39.796296
| 221
| 0.676826
| true
| true
|
|
1c460e435bc0e519d5da56e295c2516fae50f58a
| 2,381
|
py
|
Python
|
pgmpy/exceptions/Exceptions.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:30:57.000Z
|
2016-08-27T18:30:57.000Z
|
pgmpy/exceptions/Exceptions.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | null | null | null |
pgmpy/exceptions/Exceptions.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:31:00.000Z
|
2016-08-27T18:31:00.000Z
|
#!/usr/bin/env python3
"""Contains all the user-defined exceptions created for PgmPy"""
class MissingParentsError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("Parents are missing: " + str(self.missing))
class ExtraParentsError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not parents: " + str(self.extra))
class MissingStatesError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("States are missing: " + str(self.missing))
class ExtraStatesError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not states: " + str(self.extra))
class SelfLoopError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CycleError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class StateError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class NodeNotFoundError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ScopeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class SizeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CardinalityError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class RequiredError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ModelError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class InvalidValueError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
| 20.704348
| 68
| 0.642167
|
class MissingParentsError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("Parents are missing: " + str(self.missing))
class ExtraParentsError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not parents: " + str(self.extra))
class MissingStatesError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("States are missing: " + str(self.missing))
class ExtraStatesError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not states: " + str(self.extra))
class SelfLoopError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CycleError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class StateError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class NodeNotFoundError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ScopeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class SizeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CardinalityError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class RequiredError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ModelError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class InvalidValueError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
| true
| true
|
1c460e9948c0b105e16e3c6be296155958f589a9
| 2,555
|
py
|
Python
|
only_common.py
|
taotaotao3/only_common
|
7dd3700d4bf3935c193b0b6f38a0dafa750ad01c
|
[
"MIT"
] | null | null | null |
only_common.py
|
taotaotao3/only_common
|
7dd3700d4bf3935c193b0b6f38a0dafa750ad01c
|
[
"MIT"
] | null | null | null |
only_common.py
|
taotaotao3/only_common
|
7dd3700d4bf3935c193b0b6f38a0dafa750ad01c
|
[
"MIT"
] | null | null | null |
import sys
import io
import csv
import pprint
import pandas as pd
import pdb
def excommon(arg_1 = 'a.csv', arg_2 = 'b.csv', arg_3 = 'shift-jis'):
print('sys.argv[1]:', arg_1)
print('sys.argv[2]:', arg_2)
print('sys.argv[3]:', arg_3)
df_a = pd.read_csv(arg_1, encoding=arg_3, header=None)
list_a = []
list_a = list(df_a.loc[0][0])
df_b = pd.read_csv(arg_2, encoding=arg_3, header=None)
list_b = []
list_b = list(df_b.loc[0][0])
after_content = ""
after_content2 = ""
flag_last = "0"
def duplicate_delete_csv(content, content2, after_content, after_content2, flag_last):
after_content = content
after_content2 = content2
for i in range(len(content)):
if i > int(len(content2)-1):
after_content = content[:i]
flag_last = "1"
return after_content, after_content2, flag_last
if len(content) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content2 = content
after_content2 = content2
after_content = content
return after_content, after_content2, flag_last
if len(content2) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content = content2
after_content = content
after_content2 = content2
return after_content, after_content2, flag_last
if content[i] != content2[i]:
for num in range(len(content) - i):
if content2[i] == content[i+num]:
after_content = content[:i] + content[(i+num):]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i+1]
after_content2 = content2[:i+1]
return after_content, after_content2, flag_last
after_content2 = content2[:i] + content2[i+1:]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i]
after_content2 = content2[:i]
return after_content, after_content2, flag_last
while list_a != list_b:
list_a, list_b, flag_last = duplicate_delete_csv(list_a, list_b, after_content, after_content2, flag_last)
if flag_last == "1":
break
StrA = "".join(list_a)
print('Only common parts:', StrA)
sys.exit
| 37.573529
| 115
| 0.535812
|
import sys
import io
import csv
import pprint
import pandas as pd
import pdb
def excommon(arg_1 = 'a.csv', arg_2 = 'b.csv', arg_3 = 'shift-jis'):
print('sys.argv[1]:', arg_1)
print('sys.argv[2]:', arg_2)
print('sys.argv[3]:', arg_3)
df_a = pd.read_csv(arg_1, encoding=arg_3, header=None)
list_a = []
list_a = list(df_a.loc[0][0])
df_b = pd.read_csv(arg_2, encoding=arg_3, header=None)
list_b = []
list_b = list(df_b.loc[0][0])
after_content = ""
after_content2 = ""
flag_last = "0"
def duplicate_delete_csv(content, content2, after_content, after_content2, flag_last):
after_content = content
after_content2 = content2
for i in range(len(content)):
if i > int(len(content2)-1):
after_content = content[:i]
flag_last = "1"
return after_content, after_content2, flag_last
if len(content) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content2 = content
after_content2 = content2
after_content = content
return after_content, after_content2, flag_last
if len(content2) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content = content2
after_content = content
after_content2 = content2
return after_content, after_content2, flag_last
if content[i] != content2[i]:
for num in range(len(content) - i):
if content2[i] == content[i+num]:
after_content = content[:i] + content[(i+num):]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i+1]
after_content2 = content2[:i+1]
return after_content, after_content2, flag_last
after_content2 = content2[:i] + content2[i+1:]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i]
after_content2 = content2[:i]
return after_content, after_content2, flag_last
while list_a != list_b:
list_a, list_b, flag_last = duplicate_delete_csv(list_a, list_b, after_content, after_content2, flag_last)
if flag_last == "1":
break
StrA = "".join(list_a)
print('Only common parts:', StrA)
sys.exit
| true
| true
|
1c460f108d2d697a791df8a9c61f73dfc9837a9b
| 2,840
|
py
|
Python
|
test/functional/test_framework/address.py
|
IDC-Group/VHKD
|
0256ddf1477439ebc84e97132d3673aa61c39b73
|
[
"MIT"
] | 3
|
2018-06-23T10:04:45.000Z
|
2018-06-25T02:22:01.000Z
|
test/functional/test_framework/address.py
|
IDC-Group/VHKD
|
0256ddf1477439ebc84e97132d3673aa61c39b73
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/address.py
|
IDC-Group/VHKD
|
0256ddf1477439ebc84e97132d3673aa61c39b73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The vhkdCoin Core vhkd
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| 32.272727
| 73
| 0.68662
|
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key)
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script)
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| true
| true
|
1c460f4074ead61f00745adb8067544b72ddcdf8
| 7,593
|
py
|
Python
|
tensor2tensor/rl/envs/simulated_batch_env.py
|
akshitj1/tensor2tensor
|
a76b0f0afe24c966e26d0112356eb66f5a8a37aa
|
[
"Apache-2.0"
] | 1
|
2022-03-25T03:07:28.000Z
|
2022-03-25T03:07:28.000Z
|
tensor2tensor/rl/envs/simulated_batch_env.py
|
akshitj1/tensor2tensor
|
a76b0f0afe24c966e26d0112356eb66f5a8a37aa
|
[
"Apache-2.0"
] | 1
|
2022-01-05T06:08:00.000Z
|
2022-01-05T06:08:29.000Z
|
tensor2tensor/rl/envs/simulated_batch_env.py
|
akshitj1/tensor2tensor
|
a76b0f0afe24c966e26d0112356eb66f5a8a37aa
|
[
"Apache-2.0"
] | 1
|
2021-07-15T07:25:08.000Z
|
2021-07-15T07:25:08.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch of environments inside the TensorFlow graph."""
# The code was based on Danijar Hafner's code from tf.agents:
# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.rl.envs import in_graph_batch_env
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
class HistoryBuffer(object):
"""History Buffer."""
def __init__(self, input_dataset, length):
self.input_data_iterator = (
input_dataset.batch(length).make_one_shot_iterator())
self.length = length
initial_frames = self.get_initial_observations()
initial_shape = [length] + common_layers.shape_list(initial_frames)[1:]
self._history_buff = tf.Variable(tf.zeros(initial_shape, tf.float32),
trainable=False)
def get_initial_observations(self):
return tf.cast(self.input_data_iterator.get_next(), tf.float32)
def get_all_elements(self):
return self._history_buff.read_value()
def move_by_one_element(self, element):
last_removed = self.get_all_elements()[:, 1:, ...]
element = tf.expand_dims(element, dim=1)
moved = tf.concat([last_removed, element], axis=1)
with tf.control_dependencies([moved]):
with tf.control_dependencies([self._history_buff.assign(moved)]):
return self._history_buff.read_value()
def reset(self, indices):
initial_frames = tf.gather(self.get_initial_observations(), indices)
scatter_op = tf.scatter_update(self._history_buff, indices, initial_frames)
with tf.control_dependencies([scatter_op]):
return self._history_buff.read_value()
def compute_uncertainty_reward(logits, predictions):
"""Uncertainty reward based on logits."""
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10
class SimulatedBatchEnv(in_graph_batch_env.InGraphBatchEnv):
"""Batch of environments inside the TensorFlow graph.
The batch of environments will be stepped and reset inside of the graph using
a tf.py_func(). The current batch of observations, actions, rewards, and done
flags are held in according variables.
"""
def __init__(self, environment_lambda, length, problem,
simulation_random_starts=False, intrinsic_reward_scale=0.):
"""Batch of environments inside the TensorFlow graph."""
self.length = length
self._min_reward = problem.min_reward
self._num_frames = problem.num_input_frames
self._intrinsic_reward_scale = intrinsic_reward_scale
initialization_env = environment_lambda()
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, problem_name=FLAGS.problem)
hparams.force_full_predict = True
self._model = registry.model(FLAGS.model)(
hparams, tf.estimator.ModeKeys.PREDICT)
self.action_space = initialization_env.action_space
self.action_shape = list(initialization_env.action_space.shape)
self.action_dtype = tf.int32
if simulation_random_starts:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=True, hparams=hparams)
dataset = dataset.shuffle(buffer_size=100)
else:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=False, hparams=hparams).take(1)
dataset = dataset.map(lambda x: x["inputs"]).repeat()
self.history_buffer = HistoryBuffer(dataset, self.length)
shape = (self.length, problem.frame_height, problem.frame_width,
problem.num_channels)
self._observ = tf.Variable(tf.zeros(shape, tf.float32), trainable=False)
def __len__(self):
"""Number of combined environments."""
return self.length
def simulate(self, action):
with tf.name_scope("environment/simulate"):
actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
axis=1)
history = self.history_buffer.get_all_elements()
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
model_output = self._model.infer(
{"inputs": history, "input_action": actions})
observ = tf.to_float(tf.squeeze(model_output["targets"], axis=1))
reward = tf.to_float(model_output["target_reward"])
reward = tf.reshape(reward, shape=(self.length,)) + self._min_reward
if self._intrinsic_reward_scale:
# Use the model's uncertainty about its prediction as an intrinsic
# reward. The uncertainty is measured by the log probability of the
# predicted pixel value.
if "targets_logits" not in model_output:
raise ValueError("The use of intrinsic rewards requires access to "
"the logits. Ensure that model.infer returns "
"'targets_logits'")
uncertainty_reward = compute_uncertainty_reward(
model_output["targets_logits"], model_output["targets"])
uncertainty_reward = tf.minimum(
1., self._intrinsic_reward_scale * uncertainty_reward)
uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
message="uncertainty_reward", first_n=1,
summarize=8)
reward += uncertainty_reward
done = tf.constant(False, tf.bool, shape=(self.length,))
with tf.control_dependencies([observ]):
with tf.control_dependencies(
[self._observ.assign(observ),
self.history_buffer.move_by_one_element(observ)]):
return tf.identity(reward), tf.identity(done)
def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
with tf.control_dependencies([self.history_buffer.reset(indices)]):
with tf.control_dependencies([self._observ.assign(
self.history_buffer.get_all_elements()[:, -1, ...])]):
return tf.identity(self._observ.read_value())
@property
def observ(self):
"""Access the variable holding the current observation."""
return tf.identity(self._observ)
| 40.388298
| 85
| 0.703148
|
# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.rl.envs import in_graph_batch_env
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
class HistoryBuffer(object):
def __init__(self, input_dataset, length):
self.input_data_iterator = (
input_dataset.batch(length).make_one_shot_iterator())
self.length = length
initial_frames = self.get_initial_observations()
initial_shape = [length] + common_layers.shape_list(initial_frames)[1:]
self._history_buff = tf.Variable(tf.zeros(initial_shape, tf.float32),
trainable=False)
def get_initial_observations(self):
return tf.cast(self.input_data_iterator.get_next(), tf.float32)
def get_all_elements(self):
return self._history_buff.read_value()
def move_by_one_element(self, element):
last_removed = self.get_all_elements()[:, 1:, ...]
element = tf.expand_dims(element, dim=1)
moved = tf.concat([last_removed, element], axis=1)
with tf.control_dependencies([moved]):
with tf.control_dependencies([self._history_buff.assign(moved)]):
return self._history_buff.read_value()
def reset(self, indices):
initial_frames = tf.gather(self.get_initial_observations(), indices)
scatter_op = tf.scatter_update(self._history_buff, indices, initial_frames)
with tf.control_dependencies([scatter_op]):
return self._history_buff.read_value()
def compute_uncertainty_reward(logits, predictions):
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10
class SimulatedBatchEnv(in_graph_batch_env.InGraphBatchEnv):
def __init__(self, environment_lambda, length, problem,
simulation_random_starts=False, intrinsic_reward_scale=0.):
self.length = length
self._min_reward = problem.min_reward
self._num_frames = problem.num_input_frames
self._intrinsic_reward_scale = intrinsic_reward_scale
initialization_env = environment_lambda()
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, problem_name=FLAGS.problem)
hparams.force_full_predict = True
self._model = registry.model(FLAGS.model)(
hparams, tf.estimator.ModeKeys.PREDICT)
self.action_space = initialization_env.action_space
self.action_shape = list(initialization_env.action_space.shape)
self.action_dtype = tf.int32
if simulation_random_starts:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=True, hparams=hparams)
dataset = dataset.shuffle(buffer_size=100)
else:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=False, hparams=hparams).take(1)
dataset = dataset.map(lambda x: x["inputs"]).repeat()
self.history_buffer = HistoryBuffer(dataset, self.length)
shape = (self.length, problem.frame_height, problem.frame_width,
problem.num_channels)
self._observ = tf.Variable(tf.zeros(shape, tf.float32), trainable=False)
def __len__(self):
return self.length
def simulate(self, action):
with tf.name_scope("environment/simulate"):
actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
axis=1)
history = self.history_buffer.get_all_elements()
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
model_output = self._model.infer(
{"inputs": history, "input_action": actions})
observ = tf.to_float(tf.squeeze(model_output["targets"], axis=1))
reward = tf.to_float(model_output["target_reward"])
reward = tf.reshape(reward, shape=(self.length,)) + self._min_reward
if self._intrinsic_reward_scale:
# Use the model's uncertainty about its prediction as an intrinsic
if "targets_logits" not in model_output:
raise ValueError("The use of intrinsic rewards requires access to "
"the logits. Ensure that model.infer returns "
"'targets_logits'")
uncertainty_reward = compute_uncertainty_reward(
model_output["targets_logits"], model_output["targets"])
uncertainty_reward = tf.minimum(
1., self._intrinsic_reward_scale * uncertainty_reward)
uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
message="uncertainty_reward", first_n=1,
summarize=8)
reward += uncertainty_reward
done = tf.constant(False, tf.bool, shape=(self.length,))
with tf.control_dependencies([observ]):
with tf.control_dependencies(
[self._observ.assign(observ),
self.history_buffer.move_by_one_element(observ)]):
return tf.identity(reward), tf.identity(done)
def _reset_non_empty(self, indices):
with tf.control_dependencies([self.history_buffer.reset(indices)]):
with tf.control_dependencies([self._observ.assign(
self.history_buffer.get_all_elements()[:, -1, ...])]):
return tf.identity(self._observ.read_value())
@property
def observ(self):
return tf.identity(self._observ)
| true
| true
|
1c461034d0e13519aa62b7aed184a164629d184b
| 4,234
|
py
|
Python
|
scripts/py_featextr_server/wordembed_cosine_server.py
|
MokriyYuriy/FlexNeuART
|
49f13e3f9f0b0ea1399ea558436caaedd5233f5c
|
[
"Apache-2.0"
] | null | null | null |
scripts/py_featextr_server/wordembed_cosine_server.py
|
MokriyYuriy/FlexNeuART
|
49f13e3f9f0b0ea1399ea558436caaedd5233f5c
|
[
"Apache-2.0"
] | null | null | null |
scripts/py_featextr_server/wordembed_cosine_server.py
|
MokriyYuriy/FlexNeuART
|
49f13e3f9f0b0ea1399ea558436caaedd5233f5c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import argparse
sys.path.append('.')
from scripts.py_featextr_server.base_server import BaseQueryHandler, startQueryServer
import numpy as np
from scripts.py_featextr_server.utils import loadEmbeddings, createEmbedMap, robustCosineSimil
# Exclusive==True means that only one getScores
# function is executed at at time
class CosineSimilQueryHandler(BaseQueryHandler):
def __init__(self, queryEmbedFile, docEmbedFile, exclusive, debugPrint=False, useIDF=True):
super().__init__(exclusive)
self.debugPrint = debugPrint
self.useIDF = useIDF
print('Loading answer embeddings from: ' + docEmbedFile)
answWords, self.answEmbed = loadEmbeddings(docEmbedFile)
self.answEmbedMap = createEmbedMap(answWords)
if queryEmbedFile is not None:
print('Loading query embeddings from: ' + queryEmbedFile)
queryWords, self.queryEmbed = loadEmbeddings(queryEmbedFile)
self.queryEmbedMap = createEmbedMap(queryWords)
else:
self.queryEmbed = self.answEmbed
self.queryEmbedMap = self.answEmbedMap
print('Loading is done!')
def textEntryToStr(self, te):
arr = []
if self.debugPrint:
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return 'docId=' + te.id + ' ' + ' '.join(arr)
def createDocEmbed(self, isQuery, textEntry):
if isQuery:
embeds = self.queryEmbed
embedMap = self.queryEmbedMap
else:
embeds = self.answEmbed
embedMap = self.answEmbedMap
zerov = np.zeros_like(embeds[0])
res = zerov
for winfo in textEntry.entries:
vectMult = winfo.qty
if self.useIDF:
vectMult *= winfo.IDF
word = winfo.word
if word in embedMap:
res += embeds[embedMap[word]] * vectMult
return res
# This function overrides the parent class
def computeScoresFromParsedOverride(self, query, docs):
if self.debugPrint:
print('getScores', query.id, self.textEntryToStr(query))
ret = {}
queryEmbed = self.createDocEmbed(True, query)
if self.debugPrint:
print(queryEmbed)
for d in docs:
if self.debugPrint:
print(self.textEntryToStr(d))
docEmbed = self.createDocEmbed(False, d)
if self.debugPrint:
print(docEmbed)
# Regular cosine deals poorly with all-zero vectors
simil = robustCosineSimil(docEmbed, queryEmbed)
# simil = (1-cosine(docEmbed, queryEmbed))
# Note that each element must be an array, b/c
# we can generate more than one feature per document!
ret[d.id] = [simil]
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Serving word-embedding models.')
parser.add_argument('--query_embed', metavar='query embeddings',
default=None, type=str,
help='Optional query embeddings file')
parser.add_argument('--doc_embed', metavar='doc embeddings',
required=True, type=str,
help='document embeddings file')
parser.add_argument('--debug_print', action='store_true',
help='Provide debug output')
parser.add_argument('--port', metavar='server port',
required=True, type=int,
help='Server port')
parser.add_argument('--host', metavar='server host',
default='127.0.0.1', type=str,
help='server host addr to bind the port')
args = parser.parse_args()
multiThreaded = True
startQueryServer(args.host, args.port, multiThreaded,
CosineSimilQueryHandler(exclusive=False,
queryEmbedFile=args.query_embed,
docEmbedFile=args.doc_embed,
debugPrint=args.debug_print))
| 35.579832
| 95
| 0.593056
|
import sys
import argparse
sys.path.append('.')
from scripts.py_featextr_server.base_server import BaseQueryHandler, startQueryServer
import numpy as np
from scripts.py_featextr_server.utils import loadEmbeddings, createEmbedMap, robustCosineSimil
class CosineSimilQueryHandler(BaseQueryHandler):
def __init__(self, queryEmbedFile, docEmbedFile, exclusive, debugPrint=False, useIDF=True):
super().__init__(exclusive)
self.debugPrint = debugPrint
self.useIDF = useIDF
print('Loading answer embeddings from: ' + docEmbedFile)
answWords, self.answEmbed = loadEmbeddings(docEmbedFile)
self.answEmbedMap = createEmbedMap(answWords)
if queryEmbedFile is not None:
print('Loading query embeddings from: ' + queryEmbedFile)
queryWords, self.queryEmbed = loadEmbeddings(queryEmbedFile)
self.queryEmbedMap = createEmbedMap(queryWords)
else:
self.queryEmbed = self.answEmbed
self.queryEmbedMap = self.answEmbedMap
print('Loading is done!')
def textEntryToStr(self, te):
arr = []
if self.debugPrint:
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return 'docId=' + te.id + ' ' + ' '.join(arr)
def createDocEmbed(self, isQuery, textEntry):
if isQuery:
embeds = self.queryEmbed
embedMap = self.queryEmbedMap
else:
embeds = self.answEmbed
embedMap = self.answEmbedMap
zerov = np.zeros_like(embeds[0])
res = zerov
for winfo in textEntry.entries:
vectMult = winfo.qty
if self.useIDF:
vectMult *= winfo.IDF
word = winfo.word
if word in embedMap:
res += embeds[embedMap[word]] * vectMult
return res
def computeScoresFromParsedOverride(self, query, docs):
if self.debugPrint:
print('getScores', query.id, self.textEntryToStr(query))
ret = {}
queryEmbed = self.createDocEmbed(True, query)
if self.debugPrint:
print(queryEmbed)
for d in docs:
if self.debugPrint:
print(self.textEntryToStr(d))
docEmbed = self.createDocEmbed(False, d)
if self.debugPrint:
print(docEmbed)
simil = robustCosineSimil(docEmbed, queryEmbed)
ret[d.id] = [simil]
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Serving word-embedding models.')
parser.add_argument('--query_embed', metavar='query embeddings',
default=None, type=str,
help='Optional query embeddings file')
parser.add_argument('--doc_embed', metavar='doc embeddings',
required=True, type=str,
help='document embeddings file')
parser.add_argument('--debug_print', action='store_true',
help='Provide debug output')
parser.add_argument('--port', metavar='server port',
required=True, type=int,
help='Server port')
parser.add_argument('--host', metavar='server host',
default='127.0.0.1', type=str,
help='server host addr to bind the port')
args = parser.parse_args()
multiThreaded = True
startQueryServer(args.host, args.port, multiThreaded,
CosineSimilQueryHandler(exclusive=False,
queryEmbedFile=args.query_embed,
docEmbedFile=args.doc_embed,
debugPrint=args.debug_print))
| true
| true
|
1c4610361f88087ecacad48415ecb6f130687e52
| 409
|
py
|
Python
|
XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
'''
Created by auto_sdk on 2015.06.16
'''
from top.api.base import RestApi
class OpenimChatlogsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.begin = None
self.count = None
self.end = None
self.next_key = None
self.user1 = None
self.user2 = None
def getapiname(self):
return 'taobao.openim.chatlogs.get'
| 24.058824
| 56
| 0.696822
|
from top.api.base import RestApi
class OpenimChatlogsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.begin = None
self.count = None
self.end = None
self.next_key = None
self.user1 = None
self.user2 = None
def getapiname(self):
return 'taobao.openim.chatlogs.get'
| true
| true
|
1c46117a8c4860a623124d64ceca53a37a0253a2
| 4,961
|
py
|
Python
|
project/Code/video_stabilizer.py
|
OmerRe/video-processing-methods
|
245a89aaa1e774a62da1f043058242841a4f53ee
|
[
"MIT"
] | 1
|
2022-03-23T13:07:28.000Z
|
2022-03-23T13:07:28.000Z
|
project/Code/video_stabilizer.py
|
OmerRe/video-processing-methods
|
245a89aaa1e774a62da1f043058242841a4f53ee
|
[
"MIT"
] | null | null | null |
project/Code/video_stabilizer.py
|
OmerRe/video-processing-methods
|
245a89aaa1e774a62da1f043058242841a4f53ee
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from Code.utils import fixBorder, convert_to_gray
def stabilize_video(video_frames: list, config: dict) -> list:
"""Creating a stabilized video from an arbitrary input video.
Args:
input_video: cv2.VideoCapture. Video we want to stabilize.
config: dict. Dictionary which contains useful constants.
Returns:
None, but creates stabilized video from the input video.
Details:
"""
print("Starting Video Stabilization...")
transforms = find_motion_between_frames(config['video_params'], video_frames, config)
transforms_smooth = calc_smooth_transforms(config, transforms)
stabilized_frames = apply_smooth_motion_to_frames(config['video_params'], video_frames, transforms_smooth)
print("Video Stabilization Finished")
return stabilized_frames
def find_motion_between_frames(video_params: dict, video_frames: list, config: dict) -> np.ndarray:
# Pre-define transformation-store array
transforms = np.zeros((video_params['n_frames'] - 1, 9), np.float32)
prev_frame_gray = cv2.cvtColor(video_frames[0], cv2.COLOR_BGR2GRAY)
for frame_idx, current_frame in enumerate(video_frames[1:]):
# Detecting feature points in previous frame
prev_frame_pts = []
curr_frame_pts = []
current_frame_gray = convert_to_gray(current_frame)
# Calculating optical flow and keeping only the valid features points
detector = cv2.FastFeatureDetector.create()
orb = cv2.ORB_create()
kp1 = detector.detect(prev_frame_gray, None)
kp2 = detector.detect(current_frame_gray, None)
kp1, des1 = orb.compute(prev_frame_gray, kp1)
kp2, des2 = orb.compute(current_frame_gray, kp2)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# img3 = cv2.drawMatches(prev_frame_gray, kp1, current_frame_gray, kp2, matches, None,
# flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# plt.imshow(img3), plt.show()
prev_frame_pts.append(np.float32([kp1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2))
curr_frame_pts.append(np.float32([kp2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2))
prev_frame_pts = np.squeeze(np.array(prev_frame_pts))
curr_frame_pts = np.squeeze(np.array(curr_frame_pts))
transform_matrix, mask = cv2.findHomography(prev_frame_pts, curr_frame_pts, cv2.RANSAC, 5.0)
transforms[frame_idx] = transform_matrix.flatten()
print(f"Video Stabilizing: calculating transformation for frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1} - Tracked points: {len(prev_frame_pts)}")
prev_frame_gray = current_frame_gray
return transforms
def apply_smooth_motion_to_frames(video_params: dict, video_frames: list, transforms_smooth: np.ndarray) -> list:
stabilized_frames = [fixBorder(video_frames[0])]
# Write n_frames-1 transformed frames
for frame_idx, current_frame in enumerate(video_frames[:-1]):
print(f"Video Stabilizing: applying transformation to frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1}")
transform_matrix = transforms_smooth[frame_idx].reshape((3, 3))
# Apply homography wrapping to the given frame
frame_stabilized = cv2.warpPerspective(current_frame, transform_matrix, (video_params['w'], video_params['h']))
# Fix border artifacts
frame_stabilized = fixBorder(frame_stabilized)
stabilized_frames.append(frame_stabilized)
return stabilized_frames
def movingAverage(curve: np.ndarray, radius: int) -> np.ndarray:
window_size = 2 * radius + 1
# Define the filter
f = np.ones(window_size)/window_size
# Add padding to the boundaries
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
# Apply convolution
curve_smoothed = np.convolve(curve_pad, f, mode='same')
# Remove padding
curve_smoothed = curve_smoothed[radius:-radius]
# return smoothed curve
return curve_smoothed
def smooth(trajectory: np.ndarray, config: dict) -> np.ndarray:
smoothed_trajectory = np.copy(trajectory)
for i in range(smoothed_trajectory.shape[1]):
smoothed_trajectory[:, i] = movingAverage(trajectory[:, i], radius=config['SMOOTHING_RADIUS'])
return smoothed_trajectory
def calc_smooth_transforms(config: dict, transforms: np.ndarray) -> np.ndarray:
# Compute trajectory using cumulative sum of transformations
trajectory = np.cumsum(transforms, axis=0)
smoothed_trajectory = smooth(trajectory, config)
# Calculate difference between smoothed_trajectory and trajectory
difference = smoothed_trajectory - trajectory
# Calculate smooth transformation array
transforms_smooth = transforms + difference
return transforms_smooth
| 45.513761
| 119
| 0.712961
|
import cv2
import numpy as np
from Code.utils import fixBorder, convert_to_gray
def stabilize_video(video_frames: list, config: dict) -> list:
print("Starting Video Stabilization...")
transforms = find_motion_between_frames(config['video_params'], video_frames, config)
transforms_smooth = calc_smooth_transforms(config, transforms)
stabilized_frames = apply_smooth_motion_to_frames(config['video_params'], video_frames, transforms_smooth)
print("Video Stabilization Finished")
return stabilized_frames
def find_motion_between_frames(video_params: dict, video_frames: list, config: dict) -> np.ndarray:
transforms = np.zeros((video_params['n_frames'] - 1, 9), np.float32)
prev_frame_gray = cv2.cvtColor(video_frames[0], cv2.COLOR_BGR2GRAY)
for frame_idx, current_frame in enumerate(video_frames[1:]):
prev_frame_pts = []
curr_frame_pts = []
current_frame_gray = convert_to_gray(current_frame)
detector = cv2.FastFeatureDetector.create()
orb = cv2.ORB_create()
kp1 = detector.detect(prev_frame_gray, None)
kp2 = detector.detect(current_frame_gray, None)
kp1, des1 = orb.compute(prev_frame_gray, kp1)
kp2, des2 = orb.compute(current_frame_gray, kp2)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
prev_frame_pts.append(np.float32([kp1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2))
curr_frame_pts.append(np.float32([kp2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2))
prev_frame_pts = np.squeeze(np.array(prev_frame_pts))
curr_frame_pts = np.squeeze(np.array(curr_frame_pts))
transform_matrix, mask = cv2.findHomography(prev_frame_pts, curr_frame_pts, cv2.RANSAC, 5.0)
transforms[frame_idx] = transform_matrix.flatten()
print(f"Video Stabilizing: calculating transformation for frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1} - Tracked points: {len(prev_frame_pts)}")
prev_frame_gray = current_frame_gray
return transforms
def apply_smooth_motion_to_frames(video_params: dict, video_frames: list, transforms_smooth: np.ndarray) -> list:
stabilized_frames = [fixBorder(video_frames[0])]
for frame_idx, current_frame in enumerate(video_frames[:-1]):
print(f"Video Stabilizing: applying transformation to frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1}")
transform_matrix = transforms_smooth[frame_idx].reshape((3, 3))
frame_stabilized = cv2.warpPerspective(current_frame, transform_matrix, (video_params['w'], video_params['h']))
frame_stabilized = fixBorder(frame_stabilized)
stabilized_frames.append(frame_stabilized)
return stabilized_frames
def movingAverage(curve: np.ndarray, radius: int) -> np.ndarray:
window_size = 2 * radius + 1
f = np.ones(window_size)/window_size
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
curve_smoothed = np.convolve(curve_pad, f, mode='same')
curve_smoothed = curve_smoothed[radius:-radius]
return curve_smoothed
def smooth(trajectory: np.ndarray, config: dict) -> np.ndarray:
smoothed_trajectory = np.copy(trajectory)
for i in range(smoothed_trajectory.shape[1]):
smoothed_trajectory[:, i] = movingAverage(trajectory[:, i], radius=config['SMOOTHING_RADIUS'])
return smoothed_trajectory
def calc_smooth_transforms(config: dict, transforms: np.ndarray) -> np.ndarray:
trajectory = np.cumsum(transforms, axis=0)
smoothed_trajectory = smooth(trajectory, config)
difference = smoothed_trajectory - trajectory
transforms_smooth = transforms + difference
return transforms_smooth
| true
| true
|
1c4612a1484861de5941c466421c93898e7ec41d
| 347
|
py
|
Python
|
dashboard/main.py
|
BOJIT/pi-dashboard
|
134c3d7b941a470630aceed4e69b8735bcfcebfd
|
[
"MIT"
] | null | null | null |
dashboard/main.py
|
BOJIT/pi-dashboard
|
134c3d7b941a470630aceed4e69b8735bcfcebfd
|
[
"MIT"
] | null | null | null |
dashboard/main.py
|
BOJIT/pi-dashboard
|
134c3d7b941a470630aceed4e69b8735bcfcebfd
|
[
"MIT"
] | null | null | null |
"""
Copyright (c)
Author: James Bennion-Pedley
Date: 2021 - present
Licence: MIT
"""
# from dashboard import app
from flask import Blueprint, render_template
from flask_login import login_required, current_user
main = Blueprint('main', __name__)
# Home page
@main.route('/')
@login_required
def index():
return render_template('index.html')
| 16.52381
| 52
| 0.752161
|
from flask import Blueprint, render_template
from flask_login import login_required, current_user
main = Blueprint('main', __name__)
@main.route('/')
@login_required
def index():
return render_template('index.html')
| true
| true
|
1c461452d26499a8ba2aa4b2b235a47f6a1e796d
| 5,474
|
py
|
Python
|
project/S17-IO-3012/code/bin/benchmark_replicas_import.py
|
suunni/sp17-i524
|
42dd11b914c03c741dad8a8505c3e091dc6ec412
|
[
"Apache-2.0"
] | 2
|
2020-10-30T09:54:25.000Z
|
2021-12-14T19:13:18.000Z
|
project/S17-IO-3012/code/bin/benchmark_replicas_import.py
|
cloudmesh/sp17-i524
|
42dd11b914c03c741dad8a8505c3e091dc6ec412
|
[
"Apache-2.0"
] | 98
|
2017-01-19T04:24:02.000Z
|
2017-10-27T11:30:50.000Z
|
project/S17-IO-3012/code/bin/benchmark_replicas_import.py
|
cloudmesh/sp17-i524
|
42dd11b914c03c741dad8a8505c3e091dc6ec412
|
[
"Apache-2.0"
] | 294
|
2017-01-09T13:18:39.000Z
|
2018-07-13T01:32:24.000Z
|
import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: import_seconds_kilo Array with import_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: import_seconds_chameleon Array with import_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Mongoimport Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
| 38.013889
| 144
| 0.735842
|
import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
fig = plt.figure()
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
plt.ylim(ymin=0)
plt.legend(loc='best')
fig.savefig('../report/replica_import.png')
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
| true
| true
|
1c461466a808f85ad09eb1de51759f22e737153d
| 10,277
|
py
|
Python
|
sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py
|
DGT-Network/DGT-SDK
|
3413ae22e79c13e71264271fa3f82203fd49f0b3
|
[
"Apache-2.0"
] | null | null | null |
sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py
|
DGT-Network/DGT-SDK
|
3413ae22e79c13e71264271fa3f82203fd49f0b3
|
[
"Apache-2.0"
] | null | null | null |
sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py
|
DGT-Network/DGT-SDK
|
3413ae22e79c13e71264271fa3f82203fd49f0b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016, 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import getpass
import logging
import os
import sys
import traceback
import pkg_resources
from colorlog import ColoredFormatter
from dgt_intkey.client_cli.generate import add_generate_parser
from dgt_intkey.client_cli.generate import do_generate
from dgt_intkey.client_cli.populate import add_populate_parser
from dgt_intkey.client_cli.populate import do_populate
from dgt_intkey.client_cli.create_batch import add_create_batch_parser
from dgt_intkey.client_cli.create_batch import do_create_batch
from dgt_intkey.client_cli.load import add_load_parser
from dgt_intkey.client_cli.load import do_load
from dgt_intkey.client_cli.intkey_workload import add_workload_parser
from dgt_intkey.client_cli.intkey_workload import do_workload
from dgt_intkey.client_cli.intkey_client import IntkeyClient
from dgt_intkey.client_cli.exceptions import IntKeyCliException
from dgt_intkey.client_cli.exceptions import IntkeyClientException
DISTRIBUTION_NAME = 'dgt-intkey'
DEFAULT_URL = 'http://127.0.0.1:8008'
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(create_console_handler(verbose_level))
def create_parent_parser(prog_name):
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parent_parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parent_parser
def create_parser(prog_name):
parent_parser = create_parent_parser(prog_name)
parser = argparse.ArgumentParser(
parents=[parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
add_set_parser(subparsers, parent_parser)
add_inc_parser(subparsers, parent_parser)
add_dec_parser(subparsers, parent_parser)
add_show_parser(subparsers, parent_parser)
add_list_parser(subparsers, parent_parser)
add_generate_parser(subparsers, parent_parser)
add_load_parser(subparsers, parent_parser)
add_populate_parser(subparsers, parent_parser)
add_create_batch_parser(subparsers, parent_parser)
add_workload_parser(subparsers, parent_parser)
return parser
def add_set_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to set <name> to <value>.'
parser = subparsers.add_parser(
'set',
parents=[parent_parser],
description=message,
help='Sets an intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to set')
parser.add_argument(
'value',
type=int,
help='amount to set')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_set(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.set(name, value, wait)
print(response)
def add_inc_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to increment <name> by <value>.'
parser = subparsers.add_parser(
'inc',
parents=[parent_parser],
description=message,
help='Increments an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to increment')
parser.add_argument(
'value',
type=int,
help='specify amount to increment')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_inc(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.inc(name, value, wait)
print(response)
def add_dec_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to decrement <name> by <value>.'
parser = subparsers.add_parser(
'dec',
parents=[parent_parser],
description=message,
help='Decrements an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to decrement')
parser.add_argument(
'value',
type=int,
help='amount to decrement')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_dec(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.dec(name, value, wait)
print(response)
def add_show_parser(subparsers, parent_parser):
message = 'Shows the value of the key <name>.'
parser = subparsers.add_parser(
'show',
parents=[parent_parser],
description=message,
help='Displays the specified intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to show')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_show(args):
name = args.name
client = _get_client(args)
value = client.show(name)
print('{}: {}'.format(name, value))
def add_list_parser(subparsers, parent_parser):
message = 'Shows the values of all keys in intkey state.'
parser = subparsers.add_parser(
'list',
parents=[parent_parser],
description=message,
help='Displays all intkey values')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_list(args):
client = _get_client(args)
results = client.list()
for pair in results:
for name, value in pair.items():
print('{}: {}'.format(name, value))
def _get_client(args):
return IntkeyClient(
url=DEFAULT_URL if args.url is None else args.url,
keyfile=_get_keyfile(args))
def _get_keyfile(args):
try:
if args.keyfile is not None:
return args.keyfile
except AttributeError:
return None
real_user = getpass.getuser()
home = os.path.expanduser("~")
key_dir = os.path.join(home, ".sawtooth", "keys")
return '{}/{}.priv'.format(key_dir, real_user)
def main(prog_name=os.path.basename(sys.argv[0]), args=None):
if args is None:
args = sys.argv[1:]
parser = create_parser(prog_name)
args = parser.parse_args(args)
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level)
if not args.command:
parser.print_help()
sys.exit(1)
if args.command == 'set':
do_set(args)
elif args.command == 'inc':
do_inc(args)
elif args.command == 'dec':
do_dec(args)
elif args.command == 'show':
do_show(args)
elif args.command == 'list':
do_list(args)
elif args.command == 'generate':
do_generate(args)
elif args.command == 'populate':
do_populate(args)
elif args.command == 'load':
do_load(args)
elif args.command == 'create_batch':
do_create_batch(args)
elif args.command == 'workload':
do_workload(args)
else:
raise IntKeyCliException("invalid command: {}".format(args.command))
def main_wrapper():
# pylint: disable=bare-except
try:
main()
except (IntKeyCliException, IntkeyClientException) as err:
print("Error: {}".format(err), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
except SystemExit as e:
raise e
except:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| 26.763021
| 80
| 0.648827
|
import argparse
import getpass
import logging
import os
import sys
import traceback
import pkg_resources
from colorlog import ColoredFormatter
from dgt_intkey.client_cli.generate import add_generate_parser
from dgt_intkey.client_cli.generate import do_generate
from dgt_intkey.client_cli.populate import add_populate_parser
from dgt_intkey.client_cli.populate import do_populate
from dgt_intkey.client_cli.create_batch import add_create_batch_parser
from dgt_intkey.client_cli.create_batch import do_create_batch
from dgt_intkey.client_cli.load import add_load_parser
from dgt_intkey.client_cli.load import do_load
from dgt_intkey.client_cli.intkey_workload import add_workload_parser
from dgt_intkey.client_cli.intkey_workload import do_workload
from dgt_intkey.client_cli.intkey_client import IntkeyClient
from dgt_intkey.client_cli.exceptions import IntKeyCliException
from dgt_intkey.client_cli.exceptions import IntkeyClientException
DISTRIBUTION_NAME = 'dgt-intkey'
DEFAULT_URL = 'http://127.0.0.1:8008'
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(create_console_handler(verbose_level))
def create_parent_parser(prog_name):
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parent_parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parent_parser
def create_parser(prog_name):
parent_parser = create_parent_parser(prog_name)
parser = argparse.ArgumentParser(
parents=[parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
add_set_parser(subparsers, parent_parser)
add_inc_parser(subparsers, parent_parser)
add_dec_parser(subparsers, parent_parser)
add_show_parser(subparsers, parent_parser)
add_list_parser(subparsers, parent_parser)
add_generate_parser(subparsers, parent_parser)
add_load_parser(subparsers, parent_parser)
add_populate_parser(subparsers, parent_parser)
add_create_batch_parser(subparsers, parent_parser)
add_workload_parser(subparsers, parent_parser)
return parser
def add_set_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to set <name> to <value>.'
parser = subparsers.add_parser(
'set',
parents=[parent_parser],
description=message,
help='Sets an intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to set')
parser.add_argument(
'value',
type=int,
help='amount to set')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_set(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.set(name, value, wait)
print(response)
def add_inc_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to increment <name> by <value>.'
parser = subparsers.add_parser(
'inc',
parents=[parent_parser],
description=message,
help='Increments an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to increment')
parser.add_argument(
'value',
type=int,
help='specify amount to increment')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_inc(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.inc(name, value, wait)
print(response)
def add_dec_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to decrement <name> by <value>.'
parser = subparsers.add_parser(
'dec',
parents=[parent_parser],
description=message,
help='Decrements an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to decrement')
parser.add_argument(
'value',
type=int,
help='amount to decrement')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_dec(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.dec(name, value, wait)
print(response)
def add_show_parser(subparsers, parent_parser):
message = 'Shows the value of the key <name>.'
parser = subparsers.add_parser(
'show',
parents=[parent_parser],
description=message,
help='Displays the specified intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to show')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_show(args):
name = args.name
client = _get_client(args)
value = client.show(name)
print('{}: {}'.format(name, value))
def add_list_parser(subparsers, parent_parser):
message = 'Shows the values of all keys in intkey state.'
parser = subparsers.add_parser(
'list',
parents=[parent_parser],
description=message,
help='Displays all intkey values')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_list(args):
client = _get_client(args)
results = client.list()
for pair in results:
for name, value in pair.items():
print('{}: {}'.format(name, value))
def _get_client(args):
return IntkeyClient(
url=DEFAULT_URL if args.url is None else args.url,
keyfile=_get_keyfile(args))
def _get_keyfile(args):
try:
if args.keyfile is not None:
return args.keyfile
except AttributeError:
return None
real_user = getpass.getuser()
home = os.path.expanduser("~")
key_dir = os.path.join(home, ".sawtooth", "keys")
return '{}/{}.priv'.format(key_dir, real_user)
def main(prog_name=os.path.basename(sys.argv[0]), args=None):
if args is None:
args = sys.argv[1:]
parser = create_parser(prog_name)
args = parser.parse_args(args)
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level)
if not args.command:
parser.print_help()
sys.exit(1)
if args.command == 'set':
do_set(args)
elif args.command == 'inc':
do_inc(args)
elif args.command == 'dec':
do_dec(args)
elif args.command == 'show':
do_show(args)
elif args.command == 'list':
do_list(args)
elif args.command == 'generate':
do_generate(args)
elif args.command == 'populate':
do_populate(args)
elif args.command == 'load':
do_load(args)
elif args.command == 'create_batch':
do_create_batch(args)
elif args.command == 'workload':
do_workload(args)
else:
raise IntKeyCliException("invalid command: {}".format(args.command))
def main_wrapper():
# pylint: disable=bare-except
try:
main()
except (IntKeyCliException, IntkeyClientException) as err:
print("Error: {}".format(err), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
except SystemExit as e:
raise e
except:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| true
| true
|
1c46148594b66e51e3b670cc5e04060e21b3f2a6
| 1,581
|
py
|
Python
|
test_config.py
|
AshishMittal/watson-stt-wer-python
|
62dea234665aa5c11a05327e49419d27b87f1b25
|
[
"Apache-2.0"
] | 3
|
2021-06-17T14:19:44.000Z
|
2022-02-27T18:13:51.000Z
|
test_config.py
|
AshishMittal/watson-stt-wer-python
|
62dea234665aa5c11a05327e49419d27b87f1b25
|
[
"Apache-2.0"
] | 22
|
2021-06-04T13:18:10.000Z
|
2022-02-11T21:55:45.000Z
|
test_config.py
|
AshishMittal/watson-stt-wer-python
|
62dea234665aa5c11a05327e49419d27b87f1b25
|
[
"Apache-2.0"
] | 2
|
2021-07-15T19:43:36.000Z
|
2022-02-23T09:56:47.000Z
|
import unittest, os
from config import Config
def getInstance():
return Config('config.ini.sample')
class MyTest(unittest.TestCase):
def test_get_value(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
def test_get_missing_section(self):
c = getInstance()
self.assertEqual(c.getValue('NotARealSection','NotARealKey'), None)
def test_get_missing_key(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText', 'NotARealKey'), None)
def test_get_boolean_false(self):
c = getInstance()
self.assertEqual(c.getBoolean('SpeechToText', 'use_bearer_token'), False)
def test_get_boolean_true(self):
c = getInstance()
self.assertEqual(c.getBoolean('Transformations', 'remove_empty_strings'), True)
def test_get_value_with_percent(self):
c = getInstance()
self.assertEqual(c.getValue('Transformations','remove_word_list'), 'uh,uhuh,%hesitation,hesitation')
def test_set_value_with_key(self):
c = getInstance()
c.setValue('SpeechToText','smart_formatting', 'True')
self.assertEqual(c.getValue('SpeechToText', 'smart_formatting'), 'True')
def test_write_file(self):
c = getInstance()
c.writeFile('config.ini.unit_test')
self.assertEqual(Config('config.ini.unit_test').getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
os.remove('config.ini.unit_test')
if __name__ == '__main__':
unittest.main()
| 33.638298
| 124
| 0.683112
|
import unittest, os
from config import Config
def getInstance():
return Config('config.ini.sample')
class MyTest(unittest.TestCase):
def test_get_value(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
def test_get_missing_section(self):
c = getInstance()
self.assertEqual(c.getValue('NotARealSection','NotARealKey'), None)
def test_get_missing_key(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText', 'NotARealKey'), None)
def test_get_boolean_false(self):
c = getInstance()
self.assertEqual(c.getBoolean('SpeechToText', 'use_bearer_token'), False)
def test_get_boolean_true(self):
c = getInstance()
self.assertEqual(c.getBoolean('Transformations', 'remove_empty_strings'), True)
def test_get_value_with_percent(self):
c = getInstance()
self.assertEqual(c.getValue('Transformations','remove_word_list'), 'uh,uhuh,%hesitation,hesitation')
def test_set_value_with_key(self):
c = getInstance()
c.setValue('SpeechToText','smart_formatting', 'True')
self.assertEqual(c.getValue('SpeechToText', 'smart_formatting'), 'True')
def test_write_file(self):
c = getInstance()
c.writeFile('config.ini.unit_test')
self.assertEqual(Config('config.ini.unit_test').getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
os.remove('config.ini.unit_test')
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c46177306b899ada2c53a4c9fa5cec25807641b
| 12,569
|
py
|
Python
|
harmonica/equivalent_layer/harmonic_spherical.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | null | null | null |
harmonica/equivalent_layer/harmonic_spherical.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | 1
|
2022-01-19T03:02:22.000Z
|
2022-01-19T20:47:19.000Z
|
harmonica/equivalent_layer/harmonic_spherical.py
|
RichardScottOZ/harmonica
|
ccb0437ea0ed528cfd144844edab98141c8d08da
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T23:15:18.000Z
|
2022-01-17T23:15:18.000Z
|
"""
Equivalent layer for generic harmonic functions in spherical coordinates
"""
import numpy as np
from numba import jit
from sklearn.utils.validation import check_is_fitted
import verde as vd
import verde.base as vdb
from .utils import jacobian_numba, predict_numba, pop_extra_coords
from ..forward.utils import distance_spherical
class EQLHarmonicSpherical(vdb.BaseGridder):
r"""
Equivalent-layer for generic harmonic functions in spherical coordinates
This equivalent layer can be used for:
* Spherical coordinates (geographic coordinates must be converted before
use)
* Regional or global data where Earth's curvature must be taken into
account
* Gravity and magnetic data (including derivatives)
* Single data types
* Interpolation
* Upward continuation
* Finite-difference based derivative calculations
It cannot be used for:
* Joint inversion of multiple data types (e.g., gravity + gravity
gradients)
* Reduction to the pole of magnetic total field anomaly data
* Analytical derivative calculations
Point sources are located beneath the observed potential-field measurement
points by default [Cooper2000]_. Custom source locations can be used by
specifying the *points* argument. Coefficients associated with each point
source are estimated through linear least-squares with damping (Tikhonov
0th order) regularization.
The Green's function for point mass effects used is the inverse Euclidean
distance between the grid coordinates and the point source:
.. math::
\phi(\bar{x}, \bar{x}') = \frac{1}{||\bar{x} - \bar{x}'||}
where :math:`\bar{x}` and :math:`\bar{x}'` are the coordinate vectors of
the observation point and the source, respectively.
Parameters
----------
damping : None or float
The positive damping regularization parameter. Controls how much
smoothness is imposed on the estimated coefficients.
If None, no regularization is used.
points : None or list of arrays (optional)
List containing the coordinates of the point sources used as the
equivalent layer. Coordinates are assumed to be in the following order:
(``longitude``, ``latitude``, ``radius``). Both ``longitude`` and
``latitude`` must be in degrees and ``radius`` in meters.
If None, will place one point source bellow each observation point at
a fixed relative depth bellow the observation point [Cooper2000]_.
Defaults to None.
relative_depth : float
Relative depth at which the point sources are placed beneath the
observation points. Each source point will be set beneath each data
point at a depth calculated as the radius of the data point minus
this constant *relative_depth*. Use positive numbers (negative numbers
would mean point sources are above the data points). Ignored if
*points* is specified.
Attributes
----------
points_ : 2d-array
Coordinates of the point sources used to build the equivalent layer.
coefs_ : array
Estimated coefficients of every point source.
region_ : tuple
The boundaries (``[W, E, S, N]``) of the data used to fit the
interpolator. Used as the default region for the
:meth:`~harmonica.EQLHarmonicSpherical.grid` method.
"""
# Set the default dimension names for generated outputs
# as xr.Dataset.
dims = ("spherical_latitude", "longitude")
# Overwrite the defalt name for the upward coordinate.
extra_coords_name = "radius"
def __init__(
self,
damping=None,
points=None,
relative_depth=500,
):
self.damping = damping
self.points = points
self.relative_depth = relative_depth
# Define Green's function for spherical coordinates
self.greens_function = greens_func_spherical
def fit(self, coordinates, data, weights=None):
"""
Fit the coefficients of the equivalent layer.
The data region is captured and used as default for the
:meth:`~harmonica.EQLHarmonicSpherical.grid` method.
All input arrays must have the same shape.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude``, and ``radius`` will be used, all
subsequent coordinates will be ignored.
data : array
The data values of each data point.
weights : None or array
If not None, then the weights assigned to each data point.
Typically, this should be 1 over the data uncertainty squared.
Returns
-------
self
Returns this estimator instance for chaining operations.
"""
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)
# Capture the data region to use as a default when gridding.
self.region_ = vd.get_region(coordinates[:2])
coordinates = vdb.n_1d_arrays(coordinates, 3)
if self.points is None:
self.points_ = (
coordinates[0],
coordinates[1],
coordinates[2] - self.relative_depth,
)
else:
self.points_ = vdb.n_1d_arrays(self.points, 3)
jacobian = self.jacobian(coordinates, self.points_)
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)
return self
def predict(self, coordinates):
"""
Evaluate the estimated equivalent layer on the given set of points.
Requires a fitted estimator
(see :meth:`~harmonica.EQLHarmonicSpherical.fit`).
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude`` and ``radius`` will be used, all
subsequent coordinates will be ignored.
Returns
-------
data : array
The data values evaluated on the given points.
"""
# We know the gridder has been fitted if it has the coefs_
check_is_fitted(self, ["coefs_"])
shape = np.broadcast(*coordinates[:3]).shape
size = np.broadcast(*coordinates[:3]).size
dtype = coordinates[0].dtype
coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])
data = np.zeros(size, dtype=dtype)
predict_numba(
coordinates, self.points_, self.coefs_, data, self.greens_function
)
return data.reshape(shape)
def jacobian(
self, coordinates, points, dtype="float64"
): # pylint: disable=no-self-use
"""
Make the Jacobian matrix for the equivalent layer.
Each column of the Jacobian is the Green's function for a single point
source evaluated on all observation points.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude`` and ``radius`` will be used, all
subsequent coordinates will be ignored.
points : tuple of arrays
Tuple of arrays containing the coordinates of the point sources
used as equivalent layer in the following order:
(``longitude``, ``latitude``, ``radius``).
dtype : str or numpy dtype
The type of the Jacobian array.
Returns
-------
jacobian : 2D array
The (n_data, n_points) Jacobian matrix.
"""
# Compute Jacobian matrix
n_data = coordinates[0].size
n_points = points[0].size
jac = np.zeros((n_data, n_points), dtype=dtype)
jacobian_numba(coordinates, points, jac, self.greens_function)
return jac
def grid(
self,
upward,
region=None,
shape=None,
spacing=None,
dims=None,
data_names=None,
**kwargs
): # pylint: disable=arguments-differ
"""
Interpolate the data onto a regular grid.
The grid can be specified by either the number of points in each
dimension (the *shape*) or by the grid node spacing. See
:func:`verde.grid_coordinates` for details. All grid points will be
located at the same `upward` coordinate. Other arguments for
:func:`verde.grid_coordinates` can be passed as extra keyword arguments
(``kwargs``) to this method.
If the interpolator collected the input data region, then it will be
used if ``region=None``. Otherwise, you must specify the grid region.
Use the *dims* and *data_names* arguments to set custom names for the
dimensions and the data field(s) in the output :class:`xarray.Dataset`.
Default names will be provided if none are given.
Parameters
----------
upward : float
Upward coordinate of the grid points.
region : list = [W, E, S, N]
The west, east, south, and north boundaries of a given region.
shape : tuple = (n_north, n_east) or None
The number of points in the South-North and West-East directions,
respectively.
spacing : tuple = (s_north, s_east) or None
The grid spacing in the South-North and West-East directions,
respectively.
dims : list or None
The names of the northing and easting data dimensions,
respectively, in the output grid. Default is determined from the
``dims`` attribute of the class. Must be defined in the following
order: northing dimension, easting dimension.
**NOTE: This is an exception to the "easting" then
"northing" pattern but is required for compatibility with xarray.**
data_names : list of None
The name(s) of the data variables in the output grid. Defaults to
``['scalars']``.
Returns
-------
grid : xarray.Dataset
The interpolated grid. Metadata about the interpolator is written
to the ``attrs`` attribute.
"""
# We override the grid method from BaseGridder so it takes the upward
# coordinate as a positional argument. We disable pylint
# arguments-differ error because we intend to make this method
# different from the inherited one.
# Ignore extra_coords if passed
pop_extra_coords(kwargs)
# Grid data
# We always pass projection=None because that argument it's intended to
# be used only with Cartesian gridders.
grid = super().grid(
region=region,
shape=shape,
spacing=spacing,
dims=dims,
data_names=data_names,
projection=None,
extra_coords=upward,
**kwargs,
)
return grid
def scatter(
self,
region=None,
size=None,
random_state=None,
dims=None,
data_names=None,
projection=None,
**kwargs
):
"""
.. warning ::
Not implemented method. The scatter method will be deprecated on
Verde v2.0.0.
"""
raise NotImplementedError
def profile(
self,
point1,
point2,
size,
dims=None,
data_names=None,
projection=None,
**kwargs
):
"""
.. warning ::
Not implemented method. The profile on spherical coordinates should
be done using great-circle distances through the Haversine formula.
"""
raise NotImplementedError
@jit(nopython=True)
def greens_func_spherical(
longitude, latitude, radius, point_longitude, point_latitude, point_radius
):
"""
Green's function for the equivalent layer in spherical coordinates
Uses Numba to speed up things.
"""
distance = distance_spherical(
(longitude, latitude, radius), (point_longitude, point_latitude, point_radius)
)
return 1 / distance
| 36.32659
| 86
| 0.626701
|
import numpy as np
from numba import jit
from sklearn.utils.validation import check_is_fitted
import verde as vd
import verde.base as vdb
from .utils import jacobian_numba, predict_numba, pop_extra_coords
from ..forward.utils import distance_spherical
class EQLHarmonicSpherical(vdb.BaseGridder):
dims = ("spherical_latitude", "longitude")
extra_coords_name = "radius"
def __init__(
self,
damping=None,
points=None,
relative_depth=500,
):
self.damping = damping
self.points = points
self.relative_depth = relative_depth
self.greens_function = greens_func_spherical
def fit(self, coordinates, data, weights=None):
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)
# Capture the data region to use as a default when gridding.
self.region_ = vd.get_region(coordinates[:2])
coordinates = vdb.n_1d_arrays(coordinates, 3)
if self.points is None:
self.points_ = (
coordinates[0],
coordinates[1],
coordinates[2] - self.relative_depth,
)
else:
self.points_ = vdb.n_1d_arrays(self.points, 3)
jacobian = self.jacobian(coordinates, self.points_)
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)
return self
def predict(self, coordinates):
# We know the gridder has been fitted if it has the coefs_
check_is_fitted(self, ["coefs_"])
shape = np.broadcast(*coordinates[:3]).shape
size = np.broadcast(*coordinates[:3]).size
dtype = coordinates[0].dtype
coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])
data = np.zeros(size, dtype=dtype)
predict_numba(
coordinates, self.points_, self.coefs_, data, self.greens_function
)
return data.reshape(shape)
def jacobian(
self, coordinates, points, dtype="float64"
): # pylint: disable=no-self-use
# Compute Jacobian matrix
n_data = coordinates[0].size
n_points = points[0].size
jac = np.zeros((n_data, n_points), dtype=dtype)
jacobian_numba(coordinates, points, jac, self.greens_function)
return jac
def grid(
self,
upward,
region=None,
shape=None,
spacing=None,
dims=None,
data_names=None,
**kwargs
): # pylint: disable=arguments-differ
# We override the grid method from BaseGridder so it takes the upward
# coordinate as a positional argument. We disable pylint
# arguments-differ error because we intend to make this method
# different from the inherited one.
# Ignore extra_coords if passed
pop_extra_coords(kwargs)
# Grid data
# We always pass projection=None because that argument it's intended to
grid = super().grid(
region=region,
shape=shape,
spacing=spacing,
dims=dims,
data_names=data_names,
projection=None,
extra_coords=upward,
**kwargs,
)
return grid
def scatter(
self,
region=None,
size=None,
random_state=None,
dims=None,
data_names=None,
projection=None,
**kwargs
):
raise NotImplementedError
def profile(
self,
point1,
point2,
size,
dims=None,
data_names=None,
projection=None,
**kwargs
):
raise NotImplementedError
@jit(nopython=True)
def greens_func_spherical(
longitude, latitude, radius, point_longitude, point_latitude, point_radius
):
distance = distance_spherical(
(longitude, latitude, radius), (point_longitude, point_latitude, point_radius)
)
return 1 / distance
| true
| true
|
1c4618e45d73910b099a098744c5bee6d758142c
| 18,581
|
py
|
Python
|
dali/test/python/test_operator_slice.py
|
ancientmooner/DALI
|
355e8db8130cee0d20e9ae3d698f195278544995
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-05-09T03:07:07.000Z
|
2021-06-15T14:48:04.000Z
|
dali/test/python/test_operator_slice.py
|
ancientmooner/DALI
|
355e8db8130cee0d20e9ae3d698f195278544995
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
dali/test/python/test_operator_slice.py
|
ancientmooner/DALI
|
355e8db8130cee0d20e9ae3d698f195278544995
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-26T14:59:51.000Z
|
2020-04-26T14:59:51.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import os
from functools import partial
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import RandomDataIterator
from math import floor
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.slice(data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePipeline(Pipeline):
def __init__(self, device, batch_size, pos_size_iter,
num_threads=1, device_id=0, is_fused_decoder=False,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SlicePipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.is_fused_decoder = is_fused_decoder
self.pos_size_iter = pos_size_iter
self.device = device
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if self.is_fused_decoder:
if axis_names:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
else:
self.decode = ops.ImageDecoder(device = "cpu",
output_type = types.RGB)
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
def define_graph(self):
inputs, labels = self.input(name="Reader")
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
if self.is_fused_decoder:
images = self.decode(inputs, self.crop_pos, self.crop_size)
else:
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.slice(images, self.crop_pos, self.crop_size)
return images
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SliceArgsIterator(object):
def __init__(self,
batch_size,
num_dims=3,
image_shape=None, # Needed if normalized_anchor and normalized_shape are False
image_layout=None, # Needed if axis_names is used to specify the slice
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor=min_norm_anchor
self.max_norm_anchor=max_norm_anchor
self.min_norm_shape=min_norm_shape
self.max_norm_shape=max_norm_shape
self.seed=seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert(len(self.axes)>0)
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [floor(norm_anchor[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [floor(norm_shape[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
def slice_func_helper(axes, axis_names, layout, normalized_anchor, normalized_shape, image, slice_anchor, slice_shape):
# TODO(janton): remove this
if not axes and not axis_names:
axis_names = "WH"
if axis_names:
axes = []
for axis_name in axis_names:
assert(axis_name in layout)
axis_pos = layout.find(axis_name)
axes.append(axis_pos)
shape = image.shape
full_slice_anchor = [0] * len(shape)
full_slice_shape = list(shape)
for axis in axes:
idx = axes.index(axis)
full_slice_anchor[axis] = slice_anchor[idx]
full_slice_shape[axis] = slice_shape[idx]
#std::round has different behaviour than np.round so manually add 0.5 and truncate to int
if normalized_anchor and normalized_shape:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
end = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]+full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
if normalized_anchor:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
else:
start = [int(np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
if normalized_shape:
end = [start[i] + int(np.float32(shape[i]) * np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
end = [start[i] + int(np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
if len(full_slice_anchor) == 1:
return image[start[0]:end[0]]
elif len(full_slice_anchor) == 2:
return image[start[0]:end[0], start[1]:end[1]]
elif len(full_slice_anchor) == 3:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
elif len(full_slice_anchor) == 4:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]]
else:
assert(False)
class SliceSynthDataPipelinePythonOp(Pipeline):
def __init__(self, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipelinePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(self.data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePythonOp(Pipeline):
def __init__(self, batch_size, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SlicePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = "HWC"
self.pos_size_iter = pos_size_iter
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.decode = ops.ImageDecoder(device = 'cpu', output_type = types.RGB)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
imgs, _ = self.input()
imgs = self.decode(imgs)
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(imgs, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
def check_slice_synth_data_vs_numpy(device, batch_size, input_shape, layout, axes, axis_names,
normalized_anchor, normalized_shape):
eiis = [RandomDataIterator(batch_size, shape=input_shape)
for k in range(2)]
eii_args = [SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
for k in range(2)]
compare_pipelines(
SliceSynthDataPipeline(device, batch_size, layout, iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
SliceSynthDataPipelinePythonOp(batch_size, layout, iter(eiis[0]), iter(eii_args[1]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
batch_size=batch_size, N_iterations=5)
def test_slice_synth_data_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_shape, layout, axes, axis_names in \
[((200,400,3), "HWC", None, "WH"),
((200,400,3), "HWC", None, "HW"),
((200,400,3), "HWC", None, "C"),
((200,400,3), "HWC", (1,0), None),
((200,400,3), "HWC", (0,1), None),
((200,400,3), "HWC", (2,), None),
((200,), "H", (0,), None),
((200,), "H", None, "H"),
((200,400), "HW", (1,), None),
((200,400), "HW", None, "W"),
((80, 30, 20, 3), "DHWC", (2,1,0), None),
((80, 30, 20, 3), "DHWC", (0,1,2), None),
((80, 30, 20, 3), "DHWC", (2,1), None),
((80, 30, 20, 3), "DHWC", None, "WHD"),
((80, 30, 20, 3), "DHWC", None, "DHW"),
((80, 30, 20, 3), "DHWC", None, "WH"),
((80, 30, 20, 3), "DHWC", None, "C")]:
for normalized_anchor in [True, False]:
for normalized_shape in [True, False]:
yield check_slice_synth_data_vs_numpy, device, batch_size, \
input_shape, layout, axes, axis_names, normalized_anchor, normalized_shape
def check_slice_vs_fused_decoder(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names, is_fused_decoder=False),
SlicePipeline(device, batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names, is_fused_decoder=True),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_fused_decoder():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_fused_decoder, device, batch_size, axes, axis_names
def check_slice_vs_numpy(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names),
SlicePythonOp(batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_numpy, device, batch_size, axes, axis_names
| 44.135392
| 119
| 0.592379
|
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import os
from functools import partial
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import RandomDataIterator
from math import floor
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.slice(data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePipeline(Pipeline):
def __init__(self, device, batch_size, pos_size_iter,
num_threads=1, device_id=0, is_fused_decoder=False,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SlicePipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.is_fused_decoder = is_fused_decoder
self.pos_size_iter = pos_size_iter
self.device = device
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if self.is_fused_decoder:
if axis_names:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
else:
self.decode = ops.ImageDecoder(device = "cpu",
output_type = types.RGB)
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
def define_graph(self):
inputs, labels = self.input(name="Reader")
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
if self.is_fused_decoder:
images = self.decode(inputs, self.crop_pos, self.crop_size)
else:
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.slice(images, self.crop_pos, self.crop_size)
return images
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SliceArgsIterator(object):
def __init__(self,
batch_size,
num_dims=3,
image_shape=None,
image_layout=None,
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor=min_norm_anchor
self.max_norm_anchor=max_norm_anchor
self.min_norm_shape=min_norm_shape
self.max_norm_shape=max_norm_shape
self.seed=seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert(len(self.axes)>0)
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [floor(norm_anchor[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [floor(norm_shape[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
def slice_func_helper(axes, axis_names, layout, normalized_anchor, normalized_shape, image, slice_anchor, slice_shape):
if not axes and not axis_names:
axis_names = "WH"
if axis_names:
axes = []
for axis_name in axis_names:
assert(axis_name in layout)
axis_pos = layout.find(axis_name)
axes.append(axis_pos)
shape = image.shape
full_slice_anchor = [0] * len(shape)
full_slice_shape = list(shape)
for axis in axes:
idx = axes.index(axis)
full_slice_anchor[axis] = slice_anchor[idx]
full_slice_shape[axis] = slice_shape[idx]
if normalized_anchor and normalized_shape:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
end = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]+full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
if normalized_anchor:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
else:
start = [int(np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
if normalized_shape:
end = [start[i] + int(np.float32(shape[i]) * np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
end = [start[i] + int(np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
if len(full_slice_anchor) == 1:
return image[start[0]:end[0]]
elif len(full_slice_anchor) == 2:
return image[start[0]:end[0], start[1]:end[1]]
elif len(full_slice_anchor) == 3:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
elif len(full_slice_anchor) == 4:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]]
else:
assert(False)
class SliceSynthDataPipelinePythonOp(Pipeline):
def __init__(self, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipelinePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(self.data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePythonOp(Pipeline):
def __init__(self, batch_size, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SlicePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = "HWC"
self.pos_size_iter = pos_size_iter
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.decode = ops.ImageDecoder(device = 'cpu', output_type = types.RGB)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
imgs, _ = self.input()
imgs = self.decode(imgs)
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(imgs, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
def check_slice_synth_data_vs_numpy(device, batch_size, input_shape, layout, axes, axis_names,
normalized_anchor, normalized_shape):
eiis = [RandomDataIterator(batch_size, shape=input_shape)
for k in range(2)]
eii_args = [SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
for k in range(2)]
compare_pipelines(
SliceSynthDataPipeline(device, batch_size, layout, iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
SliceSynthDataPipelinePythonOp(batch_size, layout, iter(eiis[0]), iter(eii_args[1]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
batch_size=batch_size, N_iterations=5)
def test_slice_synth_data_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_shape, layout, axes, axis_names in \
[((200,400,3), "HWC", None, "WH"),
((200,400,3), "HWC", None, "HW"),
((200,400,3), "HWC", None, "C"),
((200,400,3), "HWC", (1,0), None),
((200,400,3), "HWC", (0,1), None),
((200,400,3), "HWC", (2,), None),
((200,), "H", (0,), None),
((200,), "H", None, "H"),
((200,400), "HW", (1,), None),
((200,400), "HW", None, "W"),
((80, 30, 20, 3), "DHWC", (2,1,0), None),
((80, 30, 20, 3), "DHWC", (0,1,2), None),
((80, 30, 20, 3), "DHWC", (2,1), None),
((80, 30, 20, 3), "DHWC", None, "WHD"),
((80, 30, 20, 3), "DHWC", None, "DHW"),
((80, 30, 20, 3), "DHWC", None, "WH"),
((80, 30, 20, 3), "DHWC", None, "C")]:
for normalized_anchor in [True, False]:
for normalized_shape in [True, False]:
yield check_slice_synth_data_vs_numpy, device, batch_size, \
input_shape, layout, axes, axis_names, normalized_anchor, normalized_shape
def check_slice_vs_fused_decoder(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names, is_fused_decoder=False),
SlicePipeline(device, batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names, is_fused_decoder=True),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_fused_decoder():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_fused_decoder, device, batch_size, axes, axis_names
def check_slice_vs_numpy(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names),
SlicePythonOp(batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_numpy, device, batch_size, axes, axis_names
| true
| true
|
1c4618feed0faaaedbc546d3b6511a52116feb26
| 318
|
py
|
Python
|
Lib/site-packages/django_makemessages_xgettext/management/commands/makemessagesxgettext.py
|
MortazaviM/Hackim
|
28bf9897d1793176711d1c91f5b7ac57bf4b8a36
|
[
"bzip2-1.0.6"
] | 2
|
2016-11-16T19:16:51.000Z
|
2018-02-23T02:52:35.000Z
|
django_makemessages_xgettext/management/commands/makemessagesxgettext.py
|
resulto/django-makemessages-xgettext
|
6af1590ec4dc2ffd6670e026d098cb0baa415d54
|
[
"BSD-3-Clause"
] | null | null | null |
django_makemessages_xgettext/management/commands/makemessagesxgettext.py
|
resulto/django-makemessages-xgettext
|
6af1590ec4dc2ffd6670e026d098cb0baa415d54
|
[
"BSD-3-Clause"
] | null | null | null |
import django
if django.get_version().startswith("1.7"):
from django_makemessages_xgettext import django17_makemessagesxgettext
Command = django17_makemessagesxgettext.Command
else:
from django_makemessages_xgettext import django18_makemessagesxgettext
Command = django18_makemessagesxgettext.Command
| 35.333333
| 74
| 0.839623
|
import django
if django.get_version().startswith("1.7"):
from django_makemessages_xgettext import django17_makemessagesxgettext
Command = django17_makemessagesxgettext.Command
else:
from django_makemessages_xgettext import django18_makemessagesxgettext
Command = django18_makemessagesxgettext.Command
| true
| true
|
1c4619c76a66576b7e0d2dd8529056fbf1cb9d05
| 67,648
|
py
|
Python
|
dulwich/tests/test_porcelain.py
|
stmcginnis/dulwich
|
c33607e8d76643c6ec44b3010b138d2039c9acec
|
[
"Apache-2.0"
] | 1
|
2020-08-08T21:55:08.000Z
|
2020-08-08T21:55:08.000Z
|
dulwich/tests/test_porcelain.py
|
stmcginnis/dulwich
|
c33607e8d76643c6ec44b3010b138d2039c9acec
|
[
"Apache-2.0"
] | null | null | null |
dulwich/tests/test_porcelain.py
|
stmcginnis/dulwich
|
c33607e8d76643c6ec44b3010b138d2039c9acec
|
[
"Apache-2.0"
] | null | null | null |
# test_porcelain.py -- porcelain tests
# Copyright (C) 2013 Jelmer Vernooij <jelmer@jelmer.uk>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for dulwich.porcelain."""
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import errno
import os
import shutil
import tarfile
import tempfile
import time
from dulwich import porcelain
from dulwich.diff_tree import tree_changes
from dulwich.objects import (
Blob,
Tag,
Tree,
ZERO_SHA,
)
from dulwich.repo import (
NoIndexPresent,
Repo,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
build_commit_graph,
make_commit,
make_object,
)
def flat_walk_dir(dir_to_walk):
for dirpath, _, filenames in os.walk(dir_to_walk):
rel_dirpath = os.path.relpath(dirpath, dir_to_walk)
if not dirpath == dir_to_walk:
yield rel_dirpath
for filename in filenames:
if dirpath == dir_to_walk:
yield filename
else:
yield os.path.join(rel_dirpath, filename)
class PorcelainTestCase(TestCase):
def setUp(self):
super(PorcelainTestCase, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.test_dir)
self.repo_path = os.path.join(self.test_dir, 'repo')
self.repo = Repo.init(self.repo_path, mkdir=True)
self.addCleanup(self.repo.close)
class ArchiveTests(PorcelainTestCase):
"""Tests for the archive command."""
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/master"] = c3.id
out = BytesIO()
err = BytesIO()
porcelain.archive(self.repo.path, b"refs/heads/master", outstream=out,
errstream=err)
self.assertEqual(b"", err.getvalue())
tf = tarfile.TarFile(fileobj=out)
self.addCleanup(tf.close)
self.assertEqual([], tf.getnames())
class UpdateServerInfoTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
porcelain.update_server_info(self.repo.path)
self.assertTrue(os.path.exists(
os.path.join(self.repo.controldir(), 'info', 'refs')))
class CommitTests(PorcelainTestCase):
def test_custom_author(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
def test_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class CleanTests(PorcelainTestCase):
def put_files(self, tracked, ignored, untracked, empty_dirs):
"""Put the described files in the wd
"""
all_files = tracked | ignored | untracked
for file_path in all_files:
abs_path = os.path.join(self.repo.path, file_path)
# File may need to be written in a dir that doesn't exist yet, so
# create the parent dir(s) as necessary
parent_dir = os.path.dirname(abs_path)
try:
os.makedirs(parent_dir)
except OSError as err:
if not err.errno == errno.EEXIST:
raise err
with open(abs_path, 'w') as f:
f.write('')
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.writelines(ignored)
for dir_path in empty_dirs:
os.mkdir(os.path.join(self.repo.path, 'empty_dir'))
files_to_add = [os.path.join(self.repo.path, t) for t in tracked]
porcelain.add(repo=self.repo.path, paths=files_to_add)
porcelain.commit(repo=self.repo.path, message="init commit")
def assert_wd(self, expected_paths):
"""Assert paths of files and dirs in wd are same as expected_paths
"""
control_dir_rel = os.path.relpath(
self.repo._controldir, self.repo.path)
# normalize paths to simplify comparison across platforms
found_paths = {
os.path.normpath(p)
for p in flat_walk_dir(self.repo.path)
if not p.split(os.sep)[0] == control_dir_rel}
norm_expected_paths = {os.path.normpath(p) for p in expected_paths}
self.assertEqual(found_paths, norm_expected_paths)
def test_from_root(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(repo=self.repo.path, target_dir=self.repo.path)
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'tracked_dir'})
def test_from_subdir(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(
repo=self.repo,
target_dir=os.path.join(self.repo.path, 'untracked_dir'))
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'empty_dir',
'untracked_dir',
'tracked_dir',
'tracked_dir/untracked_dir'})
class CloneTests(PorcelainTestCase):
def test_simple_local(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
self.repo.refs[b"refs/tags/foo"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(self.repo.path, target_path,
checkout=False, errstream=errstream)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c3.id, target_repo.refs[b'refs/tags/foo'])
self.assertTrue(b'f1' not in os.listdir(target_path))
self.assertTrue(b'f2' not in os.listdir(target_path))
c = r.get_config()
encoded_path = self.repo.path
if not isinstance(encoded_path, bytes):
encoded_path = encoded_path.encode('utf-8')
self.assertEqual(encoded_path, c.get((b'remote', b'origin'), b'url'))
self.assertEqual(
b'+refs/heads/*:refs/remotes/origin/*',
c.get((b'remote', b'origin'), b'fetch'))
def test_simple_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(self.repo.path, target_path,
checkout=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
self.assertEqual(r.head(), c3.id)
self.assertTrue('f1' in os.listdir(target_path))
self.assertTrue('f2' in os.listdir(target_path))
def test_bare_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, bare=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
r.head()
self.assertRaises(NoIndexPresent, r.open_index)
self.assertFalse(b'f1' in os.listdir(target_path))
self.assertFalse(b'f2' in os.listdir(target_path))
def test_no_checkout_with_bare(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
self.repo.refs[b"HEAD"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
self.assertRaises(
ValueError, porcelain.clone, self.repo.path,
target_path, checkout=True, bare=True, errstream=errstream)
def test_no_head_no_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = BytesIO()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
def test_no_head_no_checkout_outstream_errstream_autofallback(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = porcelain.NoneStream()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
class InitTests(TestCase):
def test_non_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir)
def test_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir, bare=True)
class AddTests(PorcelainTestCase):
def test_add_default_paths(self):
# create a file for initial commit
fullpath = os.path.join(self.repo.path, 'blah')
with open(fullpath, 'w') as f:
f.write("\n")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>', committer=b'test <email>')
# Add a second test file and a file in a directory
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("\n")
os.mkdir(os.path.join(self.repo.path, 'adir'))
with open(os.path.join(self.repo.path, 'adir', 'afile'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path)
finally:
os.chdir(cwd)
# Check that foo was added and nothing in .git was modified
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'adir/afile', b'blah', b'foo'])
def test_add_default_paths_subdir(self):
os.mkdir(os.path.join(self.repo.path, 'foo'))
with open(os.path.join(self.repo.path, 'blah'), 'w') as f:
f.write("\n")
with open(os.path.join(self.repo.path, 'foo', 'blie'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(os.path.join(self.repo.path, 'foo'))
porcelain.add(repo=self.repo.path)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
finally:
os.chdir(cwd)
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'foo/blie'])
def test_add_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
self.assertIn(b"foo", self.repo.open_index())
def test_add_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write("foo")
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
with open(os.path.join(self.repo.path, 'bar'), 'w') as f:
f.write("BAR")
(added, ignored) = porcelain.add(self.repo.path, paths=[
os.path.join(self.repo.path, "foo"),
os.path.join(self.repo.path, "bar")])
self.assertIn(b"bar", self.repo.open_index())
self.assertEqual(set(['bar']), set(added))
self.assertEqual(set(['foo']), ignored)
def test_add_file_absolute_path(self):
# Absolute paths are (not yet) supported
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
porcelain.add(self.repo, paths=[os.path.join(self.repo.path, "foo")])
self.assertIn(b"foo", self.repo.open_index())
def test_add_not_in_repo(self):
with open(os.path.join(self.test_dir, 'foo'), 'w') as f:
f.write("BAR")
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=[os.path.join(self.test_dir, "foo")])
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=["../foo"])
self.assertEqual([], list(self.repo.open_index()))
def test_add_file_clrf_conversion(self):
# Set the right configuration to the repo
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add a file with CRLF line-ending
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'wb') as f:
f.write(b"line1\r\nline2")
porcelain.add(self.repo.path, paths=[fullpath])
# The line-endings should have been converted to LF
index = self.repo.open_index()
self.assertIn(b"foo", index)
entry = index[b"foo"]
blob = self.repo[entry.sha]
self.assertEqual(blob.data, b"line1\nline2")
class RemoveTests(PorcelainTestCase):
def test_remove_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo, message=b'test',
author=b'test <email>',
committer=b'test <email>')
self.assertTrue(os.path.exists(os.path.join(self.repo.path, 'foo')))
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, 'foo')))
def test_remove_file_staged(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path, paths=[fullpath])
self.assertRaises(Exception, porcelain.rm, self.repo.path,
paths=["foo"])
finally:
os.chdir(cwd)
class LogTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream)
self.assertEqual(3, outstream.getvalue().count("-" * 50))
def test_max_entries(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream, max_entries=1)
self.assertEqual(1, outstream.getvalue().count("-" * 50))
class ShowTests(PorcelainTestCase):
def test_nolist(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=c3.id, outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_blob(self):
b = Blob.from_string(b"The Foo\n")
self.repo.object_store.add_object(b)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[b.id], outstream=outstream)
self.assertEqual(outstream.getvalue(), "The Foo\n")
def test_commit_no_parent(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[ca.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_tag(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
porcelain.tag_create(
self.repo.path, b"tryme", b'foo <foo@bar.com>', b'bar',
annotated=True, objectish=ca.id, tag_time=1552854211,
tag_timezone=0)
outstream = StringIO()
porcelain.show(self.repo, objects=[b'refs/tags/tryme'],
outstream=outstream)
self.maxDiff = None
self.assertMultiLineEqual(outstream.getvalue(), """\
Tagger: foo <foo@bar.com>
Date: Sun Mar 17 2019 20:23:31 +0000
bar
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_commit_with_change(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
b = Blob.from_string(b"The Bar\n")
tb = Tree()
tb.add(b"somename", 0o100644, b.id)
cb = make_commit(tree=tb.id, parents=[ca.id])
self.repo.object_store.add_objects(
[(a, None), (b, None), (ta, None), (tb, None),
(ca, None), (cb, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[cb.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 2c6b6c9cb72c130956657e1fdae58e5b103744fa
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
index ea5c7bf..fd38bcb 100644
--- a/somename
+++ b/somename
@@ -1 +1 @@
-The Foo
+The Bar
""")
class SymbolicRefTests(PorcelainTestCase):
def test_set_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
self.assertRaises(ValueError, porcelain.symbolic_ref, self.repo.path,
b'foobar')
def test_set_force_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'force_foobar', force=True)
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/force_foobar\n')
def test_set_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'master')
def test_set_symbolic_ref_other_than_master(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]],
attrs=dict(refs='develop'))
self.repo.refs[b"HEAD"] = c3.id
self.repo.refs[b"refs/heads/develop"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'develop')
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/develop\n')
class DiffTreeTests(PorcelainTestCase):
def test_empty(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = BytesIO()
porcelain.diff_tree(self.repo.path, c2.tree, c3.tree,
outstream=outstream)
self.assertEqual(outstream.getvalue(), b"")
class CommitTreeTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
b = Blob()
b.data = b"foo the bar"
t = Tree()
t.add(b"somename", 0o100644, b.id)
self.repo.object_store.add_object(t)
self.repo.object_store.add_object(b)
sha = porcelain.commit_tree(
self.repo.path, t.id, message=b"Withcommit.",
author=b"Joe <joe@example.com>",
committer=b"Jane <jane@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class RevListTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
outstream = BytesIO()
porcelain.rev_list(
self.repo.path, [c3.id], outstream=outstream)
self.assertEqual(
c3.id + b"\n" +
c2.id + b"\n" +
c1.id + b"\n",
outstream.getvalue())
class TagCreateTests(PorcelainTestCase):
def test_annotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b'refs/tags/tryme']
self.assertTrue(isinstance(tag, Tag))
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
def test_unannotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
def test_unannotated_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, "tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
class TagListTests(PorcelainTestCase):
def test_empty(self):
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([], tags)
def test_simple(self):
self.repo.refs[b"refs/tags/foo"] = b"aa" * 20
self.repo.refs[b"refs/tags/bar/bla"] = b"bb" * 20
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([b"bar/bla", b"foo"], tags)
class TagDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.tag_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.tag_list(self.repo))
porcelain.tag_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.tag_list(self.repo))
class ResetTests(PorcelainTestCase):
def test_hard_head(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(os.path.join(self.repo.path, 'foo'), 'wb') as f:
f.write(b"OOH")
porcelain.reset(self.repo, "hard", b"HEAD")
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[b'HEAD'].tree))
self.assertEqual([], changes)
def test_hard_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
sha = porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(fullpath, 'wb') as f:
f.write(b"BAZ")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some other message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
porcelain.reset(self.repo, "hard", sha)
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[sha].tree))
self.assertEqual([], changes)
class PushTests(PorcelainTestCase):
def test_simple(self):
"""
Basic test of porcelain push where self.repo is the remote. First
clone the remote, commit a file to the clone, then push the changes
back to the remote.
"""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
try:
self.assertEqual(target_repo[b'HEAD'], self.repo[b'HEAD'])
finally:
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
porcelain.commit(repo=clone_path, message=b'push',
author=b'author <email>',
committer=b'committer <email>')
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b"HEAD:" + refs_path,
outstream=outstream, errstream=errstream)
# Check that the target and source
with Repo(clone_path) as r_clone:
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/foo': r_clone[b'HEAD'].id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
self.assertEqual(r_clone[b'HEAD'].id, self.repo[refs_path].id)
# Get the change in the target repo corresponding to the add
# this will be in the foo branch.
change = list(tree_changes(self.repo, self.repo[b'HEAD'].tree,
self.repo[b'refs/heads/foo'].tree))[0]
self.assertEqual(os.path.basename(fullpath),
change.new.path.decode('ascii'))
def test_delete(self):
"""Basic test of porcelain push, removing a branch.
"""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
target_repo.close()
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b":" + refs_path,
outstream=outstream, errstream=errstream)
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
class PullTests(PorcelainTestCase):
def setUp(self):
super(PullTests, self).setUp()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
self.target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target_path)
target_repo = porcelain.clone(self.repo.path, target=self.target_path,
errstream=BytesIO())
target_repo.close()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertTrue(b'refs/heads/master' in self.repo.refs)
self.assertTrue(b'refs/heads/master' in target_repo.refs)
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, b'refs/heads/master',
outstream=outstream, errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
def test_no_refspec(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, outstream=outstream,
errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
class StatusTests(PorcelainTestCase):
def test_empty(self):
results = porcelain.status(self.repo)
self.assertEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertEqual([], results.unstaged)
def test_status_base(self):
"""Integration test for `status` functionality."""
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# modify access and modify time of path
os.utime(fullpath, (0, 0))
with open(fullpath, 'wb') as f:
f.write(b'stuff')
# Make a dummy file and stage it
filename_add = 'bar'
fullpath = os.path.join(self.repo.path, filename_add)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
results = porcelain.status(self.repo)
self.assertEqual(results.staged['add'][0],
filename_add.encode('ascii'))
self.assertEqual(results.unstaged, [b'foo'])
def test_status_all(self):
del_path = os.path.join(self.repo.path, 'foo')
mod_path = os.path.join(self.repo.path, 'bar')
add_path = os.path.join(self.repo.path, 'baz')
us_path = os.path.join(self.repo.path, 'blye')
ut_path = os.path.join(self.repo.path, 'blyat')
with open(del_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('origstuff')
with open(us_path, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[del_path, mod_path, us_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
porcelain.remove(self.repo.path, [del_path])
with open(add_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('more_origstuff')
with open(us_path, 'w') as f:
f.write('more_origstuff')
porcelain.add(repo=self.repo.path, paths=[add_path, mod_path])
with open(us_path, 'w') as f:
f.write('\norigstuff')
with open(ut_path, 'w') as f:
f.write('origstuff')
results = porcelain.status(self.repo.path)
self.assertDictEqual(
{'add': [b'baz'], 'delete': [b'foo'], 'modify': [b'bar']},
results.staged)
self.assertListEqual(results.unstaged, [b'blye'])
self.assertListEqual(results.untracked, ['blyat'])
def test_status_crlf_mismatch(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [b'crlf'])
self.assertListEqual(results.untracked, [])
def test_status_crlf_convert(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
# TODO: It should be set automatically by looking at the configuration
c = self.repo.get_config()
c.set("core", "autocrlf", True)
c.write_to_path()
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_get_tree_changes_add(self):
"""Unit test for get_tree_changes add."""
# Make a dummy file, stage
filename = 'bar'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['add'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 1)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_modify(self):
"""Unit test for get_tree_changes modify."""
# Make a dummy file, stage, commit, modify
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
with open(fullpath, 'w') as f:
f.write('otherstuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['modify'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 1)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_delete(self):
"""Unit test for get_tree_changes delete."""
# Make a dummy file, stage, commit, remove
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(repo=self.repo.path, paths=[filename])
finally:
os.chdir(cwd)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['delete'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 1)
def test_get_untracked_paths(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('ignored\n')
with open(os.path.join(self.repo.path, 'ignored'), 'w') as f:
f.write('blah\n')
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
self.assertEqual(
set(['ignored', 'notignored', '.gitignore']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(set(['.gitignore', 'notignored']),
set(porcelain.status(self.repo).untracked))
self.assertEqual(set(['.gitignore', 'notignored', 'ignored']),
set(porcelain.status(self.repo, ignored=True)
.untracked))
def test_get_untracked_paths_nested(self):
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
subrepo = Repo.init(os.path.join(self.repo.path, 'nested'), mkdir=True)
with open(os.path.join(subrepo.path, 'another'), 'w') as f:
f.write('foo\n')
self.assertEqual(
set(['notignored']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(
set(['another']),
set(porcelain.get_untracked_paths(subrepo.path, subrepo.path,
subrepo.open_index())))
# TODO(jelmer): Add test for dulwich.porcelain.daemon
class UploadPackTests(PorcelainTestCase):
"""Tests for upload_pack."""
def test_upload_pack(self):
outf = BytesIO()
exitcode = porcelain.upload_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([b"0000"], outlines)
self.assertEqual(0, exitcode)
class ReceivePackTests(PorcelainTestCase):
"""Tests for receive_pack."""
def test_receive_pack(self):
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
self.repo.do_commit(message=b'test status',
author=b'author <email>',
committer=b'committer <email>',
author_timestamp=1402354300,
commit_timestamp=1402354300, author_timezone=0,
commit_timezone=0)
outf = BytesIO()
exitcode = porcelain.receive_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([
b'0091319b56ce3aee2d489f759736a79cc552c9bb86d9 HEAD\x00 report-status ' # noqa: E501
b'delete-refs quiet ofs-delta side-band-64k '
b'no-done symref=HEAD:refs/heads/master',
b'003f319b56ce3aee2d489f759736a79cc552c9bb86d9 refs/heads/master',
b'0000'], outlines)
self.assertEqual(0, exitcode)
class BranchListTests(PorcelainTestCase):
def test_standard(self):
self.assertEqual(set([]), set(porcelain.branch_list(self.repo)))
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchCreateTests(PorcelainTestCase):
def test_branch_exists(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertRaises(KeyError, porcelain.branch_create, self.repo, b"foo")
porcelain.branch_create(self.repo, b"foo", force=True)
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
def test_simple_unicode(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, 'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, 'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
class FetchTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
def test_with_remote_name(self):
remote_name = b'origin'
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# Capture current refs
target_refs = target_repo.get_refs()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path, remote_name=remote_name,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes, as well as updates
# for the refs
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
self.assertNotEqual(self.repo.get_refs(), target_refs)
def assert_correct_remote_refs(
self, local_refs, remote_refs, remote_name=b'origin'):
"""Assert that known remote refs corresponds to actual remote refs."""
local_ref_prefix = b'refs/heads'
remote_ref_prefix = b'refs/remotes/' + remote_name
locally_known_remote_refs = {
k[len(remote_ref_prefix) + 1:]: v for k, v in local_refs.items()
if k.startswith(remote_ref_prefix)}
normalized_remote_refs = {
k[len(local_ref_prefix) + 1:]: v for k, v in remote_refs.items()
if k.startswith(local_ref_prefix)}
self.assertEqual(locally_known_remote_refs, normalized_remote_refs)
class RepackTests(PorcelainTestCase):
def test_empty(self):
porcelain.repack(self.repo)
def test_simple(self):
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.repack(self.repo)
class LsTreeTests(PorcelainTestCase):
def test_empty(self):
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(f.getvalue(), "")
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tfoo\n')
def test_recursive(self):
# Create a directory then write a dummy file in it
dirpath = os.path.join(self.repo.path, 'adir')
filepath = os.path.join(dirpath, 'afile')
os.mkdir(dirpath)
with open(filepath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[filepath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f, recursive=True)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n'
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tadir'
'/afile\n')
class LsRemoteTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual({}, porcelain.ls_remote(self.repo.path))
def test_some(self):
cid = porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
self.assertEqual({
b'refs/heads/master': cid,
b'HEAD': cid},
porcelain.ls_remote(self.repo.path))
class LsFilesTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual([], list(porcelain.ls_files(self.repo)))
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual([b'foo'], list(porcelain.ls_files(self.repo)))
class RemoteAddTests(PorcelainTestCase):
def test_new(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
c = self.repo.get_config()
self.assertEqual(
c.get((b'remote', b'jelmer'), b'url'),
b'git://jelmer.uk/code/dulwich')
def test_exists(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
self.assertRaises(porcelain.RemoteExists, porcelain.remote_add,
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
class CheckIgnoreTests(PorcelainTestCase):
def test_check_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo')
foo_path = os.path.join(self.repo.path, 'foo')
with open(foo_path, 'w') as f:
f.write('BAR')
bar_path = os.path.join(self.repo.path, 'bar')
with open(bar_path, 'w') as f:
f.write('BAR')
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [foo_path])))
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [bar_path])))
def test_check_added_abs(self):
path = os.path.join(self.repo.path, 'foo')
with open(path, 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [path])))
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [path], no_index=True)))
def test_check_added_rel(self):
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'bar'))
os.chdir(os.path.join(self.repo.path, 'bar'))
try:
self.assertEqual(
list(porcelain.check_ignore(self.repo, ['../foo'])), [])
self.assertEqual(['../foo'], list(
porcelain.check_ignore(self.repo, ['../foo'], no_index=True)))
finally:
os.chdir(cwd)
class UpdateHeadTests(PorcelainTestCase):
def test_set_to_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/blah',
self.repo.refs.read_ref(b'HEAD'))
def test_set_to_branch_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_to_commit_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, c1.id, detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", new_branch="bar")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/bar',
self.repo.refs.read_ref(b'HEAD'))
class MailmapTests(PorcelainTestCase):
def test_no_mailmap(self):
self.assertEqual(
b'Jelmer Vernooij <jelmer@samba.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
def test_mailmap_lookup(self):
with open(os.path.join(self.repo.path, '.mailmap'), 'wb') as f:
f.write(b"""\
Jelmer Vernooij <jelmer@debian.org>
""")
self.assertEqual(
b'Jelmer Vernooij <jelmer@debian.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
class FsckTests(PorcelainTestCase):
def test_none(self):
self.assertEqual(
[],
list(porcelain.fsck(self.repo)))
def test_git_dir(self):
obj = Tree()
a = Blob()
a.data = b"foo"
obj.add(b".git", 0o100644, a.id)
self.repo.object_store.add_objects(
[(a, None), (obj, None)])
self.assertEqual(
[(obj.id, 'invalid name .git')],
[(sha, str(e)) for (sha, e) in porcelain.fsck(self.repo)])
class DescribeTests(PorcelainTestCase):
def test_no_commits(self):
self.assertRaises(KeyError, porcelain.describe, self.repo.path)
def test_single_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
def test_tag(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
self.assertEqual(
"tryme",
porcelain.describe(self.repo.path))
def test_tag_and_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
with open(fullpath, 'w') as f:
f.write("BAR2")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'tryme-1-g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
class HelperTests(PorcelainTestCase):
def test_path_to_tree_path_base(self):
self.assertEqual(
b'bar', porcelain.path_to_tree_path('/home/foo', '/home/foo/bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', 'bar'))
cwd = os.getcwd()
self.assertEqual(
b'bar', porcelain.path_to_tree_path('.', os.path.join(cwd, 'bar')))
self.assertEqual(b'bar', porcelain.path_to_tree_path(cwd, 'bar'))
def test_path_to_tree_path_syntax(self):
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', b'./bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', b'./bar'))
def test_path_to_tree_path_error(self):
with self.assertRaises(ValueError):
porcelain.path_to_tree_path('/home/foo/', '/home/bar/baz')
def test_path_to_tree_path_rel(self):
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'foo'))
os.mkdir(os.path.join(self.repo.path, 'foo/bar'))
try:
os.chdir(os.path.join(self.repo.path, 'foo/bar'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', 'baz'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'),
os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'), 'baz'))
finally:
os.chdir(cwd)
class GetObjectBypathTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
b"BAR",
porcelain.get_object_by_path(self.repo, 'foo').data)
def test_missing(self):
self.assertRaises(
KeyError,
porcelain.get_object_by_path, self.repo, 'foo')
class WriteTreeTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual(
b'd2092c8a9f311f0311083bf8d177f2ca0ab5b241',
porcelain.write_tree(self.repo))
| 37.624027
| 97
| 0.58457
|
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import errno
import os
import shutil
import tarfile
import tempfile
import time
from dulwich import porcelain
from dulwich.diff_tree import tree_changes
from dulwich.objects import (
Blob,
Tag,
Tree,
ZERO_SHA,
)
from dulwich.repo import (
NoIndexPresent,
Repo,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
build_commit_graph,
make_commit,
make_object,
)
def flat_walk_dir(dir_to_walk):
for dirpath, _, filenames in os.walk(dir_to_walk):
rel_dirpath = os.path.relpath(dirpath, dir_to_walk)
if not dirpath == dir_to_walk:
yield rel_dirpath
for filename in filenames:
if dirpath == dir_to_walk:
yield filename
else:
yield os.path.join(rel_dirpath, filename)
class PorcelainTestCase(TestCase):
def setUp(self):
super(PorcelainTestCase, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.test_dir)
self.repo_path = os.path.join(self.test_dir, 'repo')
self.repo = Repo.init(self.repo_path, mkdir=True)
self.addCleanup(self.repo.close)
class ArchiveTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/master"] = c3.id
out = BytesIO()
err = BytesIO()
porcelain.archive(self.repo.path, b"refs/heads/master", outstream=out,
errstream=err)
self.assertEqual(b"", err.getvalue())
tf = tarfile.TarFile(fileobj=out)
self.addCleanup(tf.close)
self.assertEqual([], tf.getnames())
class UpdateServerInfoTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
porcelain.update_server_info(self.repo.path)
self.assertTrue(os.path.exists(
os.path.join(self.repo.controldir(), 'info', 'refs')))
class CommitTests(PorcelainTestCase):
def test_custom_author(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
def test_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class CleanTests(PorcelainTestCase):
def put_files(self, tracked, ignored, untracked, empty_dirs):
all_files = tracked | ignored | untracked
for file_path in all_files:
abs_path = os.path.join(self.repo.path, file_path)
# create the parent dir(s) as necessary
parent_dir = os.path.dirname(abs_path)
try:
os.makedirs(parent_dir)
except OSError as err:
if not err.errno == errno.EEXIST:
raise err
with open(abs_path, 'w') as f:
f.write('')
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.writelines(ignored)
for dir_path in empty_dirs:
os.mkdir(os.path.join(self.repo.path, 'empty_dir'))
files_to_add = [os.path.join(self.repo.path, t) for t in tracked]
porcelain.add(repo=self.repo.path, paths=files_to_add)
porcelain.commit(repo=self.repo.path, message="init commit")
def assert_wd(self, expected_paths):
control_dir_rel = os.path.relpath(
self.repo._controldir, self.repo.path)
# normalize paths to simplify comparison across platforms
found_paths = {
os.path.normpath(p)
for p in flat_walk_dir(self.repo.path)
if not p.split(os.sep)[0] == control_dir_rel}
norm_expected_paths = {os.path.normpath(p) for p in expected_paths}
self.assertEqual(found_paths, norm_expected_paths)
def test_from_root(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(repo=self.repo.path, target_dir=self.repo.path)
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'tracked_dir'})
def test_from_subdir(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(
repo=self.repo,
target_dir=os.path.join(self.repo.path, 'untracked_dir'))
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'empty_dir',
'untracked_dir',
'tracked_dir',
'tracked_dir/untracked_dir'})
class CloneTests(PorcelainTestCase):
def test_simple_local(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
self.repo.refs[b"refs/tags/foo"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(self.repo.path, target_path,
checkout=False, errstream=errstream)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c3.id, target_repo.refs[b'refs/tags/foo'])
self.assertTrue(b'f1' not in os.listdir(target_path))
self.assertTrue(b'f2' not in os.listdir(target_path))
c = r.get_config()
encoded_path = self.repo.path
if not isinstance(encoded_path, bytes):
encoded_path = encoded_path.encode('utf-8')
self.assertEqual(encoded_path, c.get((b'remote', b'origin'), b'url'))
self.assertEqual(
b'+refs/heads/*:refs/remotes/origin/*',
c.get((b'remote', b'origin'), b'fetch'))
def test_simple_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(self.repo.path, target_path,
checkout=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
self.assertEqual(r.head(), c3.id)
self.assertTrue('f1' in os.listdir(target_path))
self.assertTrue('f2' in os.listdir(target_path))
def test_bare_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, bare=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
r.head()
self.assertRaises(NoIndexPresent, r.open_index)
self.assertFalse(b'f1' in os.listdir(target_path))
self.assertFalse(b'f2' in os.listdir(target_path))
def test_no_checkout_with_bare(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
self.repo.refs[b"HEAD"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
self.assertRaises(
ValueError, porcelain.clone, self.repo.path,
target_path, checkout=True, bare=True, errstream=errstream)
def test_no_head_no_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = BytesIO()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
def test_no_head_no_checkout_outstream_errstream_autofallback(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = porcelain.NoneStream()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
class InitTests(TestCase):
def test_non_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir)
def test_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir, bare=True)
class AddTests(PorcelainTestCase):
def test_add_default_paths(self):
# create a file for initial commit
fullpath = os.path.join(self.repo.path, 'blah')
with open(fullpath, 'w') as f:
f.write("\n")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>', committer=b'test <email>')
# Add a second test file and a file in a directory
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("\n")
os.mkdir(os.path.join(self.repo.path, 'adir'))
with open(os.path.join(self.repo.path, 'adir', 'afile'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path)
finally:
os.chdir(cwd)
# Check that foo was added and nothing in .git was modified
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'adir/afile', b'blah', b'foo'])
def test_add_default_paths_subdir(self):
os.mkdir(os.path.join(self.repo.path, 'foo'))
with open(os.path.join(self.repo.path, 'blah'), 'w') as f:
f.write("\n")
with open(os.path.join(self.repo.path, 'foo', 'blie'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(os.path.join(self.repo.path, 'foo'))
porcelain.add(repo=self.repo.path)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
finally:
os.chdir(cwd)
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'foo/blie'])
def test_add_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
self.assertIn(b"foo", self.repo.open_index())
def test_add_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write("foo")
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
with open(os.path.join(self.repo.path, 'bar'), 'w') as f:
f.write("BAR")
(added, ignored) = porcelain.add(self.repo.path, paths=[
os.path.join(self.repo.path, "foo"),
os.path.join(self.repo.path, "bar")])
self.assertIn(b"bar", self.repo.open_index())
self.assertEqual(set(['bar']), set(added))
self.assertEqual(set(['foo']), ignored)
def test_add_file_absolute_path(self):
# Absolute paths are (not yet) supported
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
porcelain.add(self.repo, paths=[os.path.join(self.repo.path, "foo")])
self.assertIn(b"foo", self.repo.open_index())
def test_add_not_in_repo(self):
with open(os.path.join(self.test_dir, 'foo'), 'w') as f:
f.write("BAR")
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=[os.path.join(self.test_dir, "foo")])
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=["../foo"])
self.assertEqual([], list(self.repo.open_index()))
def test_add_file_clrf_conversion(self):
# Set the right configuration to the repo
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add a file with CRLF line-ending
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'wb') as f:
f.write(b"line1\r\nline2")
porcelain.add(self.repo.path, paths=[fullpath])
# The line-endings should have been converted to LF
index = self.repo.open_index()
self.assertIn(b"foo", index)
entry = index[b"foo"]
blob = self.repo[entry.sha]
self.assertEqual(blob.data, b"line1\nline2")
class RemoveTests(PorcelainTestCase):
def test_remove_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo, message=b'test',
author=b'test <email>',
committer=b'test <email>')
self.assertTrue(os.path.exists(os.path.join(self.repo.path, 'foo')))
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, 'foo')))
def test_remove_file_staged(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path, paths=[fullpath])
self.assertRaises(Exception, porcelain.rm, self.repo.path,
paths=["foo"])
finally:
os.chdir(cwd)
class LogTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream)
self.assertEqual(3, outstream.getvalue().count("-" * 50))
def test_max_entries(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream, max_entries=1)
self.assertEqual(1, outstream.getvalue().count("-" * 50))
class ShowTests(PorcelainTestCase):
def test_nolist(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=c3.id, outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_blob(self):
b = Blob.from_string(b"The Foo\n")
self.repo.object_store.add_object(b)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[b.id], outstream=outstream)
self.assertEqual(outstream.getvalue(), "The Foo\n")
def test_commit_no_parent(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[ca.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_tag(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
porcelain.tag_create(
self.repo.path, b"tryme", b'foo <foo@bar.com>', b'bar',
annotated=True, objectish=ca.id, tag_time=1552854211,
tag_timezone=0)
outstream = StringIO()
porcelain.show(self.repo, objects=[b'refs/tags/tryme'],
outstream=outstream)
self.maxDiff = None
self.assertMultiLineEqual(outstream.getvalue(), """\
Tagger: foo <foo@bar.com>
Date: Sun Mar 17 2019 20:23:31 +0000
bar
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_commit_with_change(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
b = Blob.from_string(b"The Bar\n")
tb = Tree()
tb.add(b"somename", 0o100644, b.id)
cb = make_commit(tree=tb.id, parents=[ca.id])
self.repo.object_store.add_objects(
[(a, None), (b, None), (ta, None), (tb, None),
(ca, None), (cb, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[cb.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 2c6b6c9cb72c130956657e1fdae58e5b103744fa
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
index ea5c7bf..fd38bcb 100644
--- a/somename
+++ b/somename
@@ -1 +1 @@
-The Foo
+The Bar
""")
class SymbolicRefTests(PorcelainTestCase):
def test_set_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
self.assertRaises(ValueError, porcelain.symbolic_ref, self.repo.path,
b'foobar')
def test_set_force_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'force_foobar', force=True)
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/force_foobar\n')
def test_set_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'master')
def test_set_symbolic_ref_other_than_master(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]],
attrs=dict(refs='develop'))
self.repo.refs[b"HEAD"] = c3.id
self.repo.refs[b"refs/heads/develop"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'develop')
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/develop\n')
class DiffTreeTests(PorcelainTestCase):
def test_empty(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = BytesIO()
porcelain.diff_tree(self.repo.path, c2.tree, c3.tree,
outstream=outstream)
self.assertEqual(outstream.getvalue(), b"")
class CommitTreeTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
b = Blob()
b.data = b"foo the bar"
t = Tree()
t.add(b"somename", 0o100644, b.id)
self.repo.object_store.add_object(t)
self.repo.object_store.add_object(b)
sha = porcelain.commit_tree(
self.repo.path, t.id, message=b"Withcommit.",
author=b"Joe <joe@example.com>",
committer=b"Jane <jane@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class RevListTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
outstream = BytesIO()
porcelain.rev_list(
self.repo.path, [c3.id], outstream=outstream)
self.assertEqual(
c3.id + b"\n" +
c2.id + b"\n" +
c1.id + b"\n",
outstream.getvalue())
class TagCreateTests(PorcelainTestCase):
def test_annotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b'refs/tags/tryme']
self.assertTrue(isinstance(tag, Tag))
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
def test_unannotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
def test_unannotated_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, "tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
class TagListTests(PorcelainTestCase):
def test_empty(self):
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([], tags)
def test_simple(self):
self.repo.refs[b"refs/tags/foo"] = b"aa" * 20
self.repo.refs[b"refs/tags/bar/bla"] = b"bb" * 20
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([b"bar/bla", b"foo"], tags)
class TagDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.tag_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.tag_list(self.repo))
porcelain.tag_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.tag_list(self.repo))
class ResetTests(PorcelainTestCase):
def test_hard_head(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(os.path.join(self.repo.path, 'foo'), 'wb') as f:
f.write(b"OOH")
porcelain.reset(self.repo, "hard", b"HEAD")
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[b'HEAD'].tree))
self.assertEqual([], changes)
def test_hard_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
sha = porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(fullpath, 'wb') as f:
f.write(b"BAZ")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some other message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
porcelain.reset(self.repo, "hard", sha)
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[sha].tree))
self.assertEqual([], changes)
class PushTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
try:
self.assertEqual(target_repo[b'HEAD'], self.repo[b'HEAD'])
finally:
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
porcelain.commit(repo=clone_path, message=b'push',
author=b'author <email>',
committer=b'committer <email>')
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b"HEAD:" + refs_path,
outstream=outstream, errstream=errstream)
# Check that the target and source
with Repo(clone_path) as r_clone:
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/foo': r_clone[b'HEAD'].id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
self.assertEqual(r_clone[b'HEAD'].id, self.repo[refs_path].id)
# Get the change in the target repo corresponding to the add
# this will be in the foo branch.
change = list(tree_changes(self.repo, self.repo[b'HEAD'].tree,
self.repo[b'refs/heads/foo'].tree))[0]
self.assertEqual(os.path.basename(fullpath),
change.new.path.decode('ascii'))
def test_delete(self):
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
target_repo.close()
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b":" + refs_path,
outstream=outstream, errstream=errstream)
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
class PullTests(PorcelainTestCase):
def setUp(self):
super(PullTests, self).setUp()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
self.target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target_path)
target_repo = porcelain.clone(self.repo.path, target=self.target_path,
errstream=BytesIO())
target_repo.close()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertTrue(b'refs/heads/master' in self.repo.refs)
self.assertTrue(b'refs/heads/master' in target_repo.refs)
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, b'refs/heads/master',
outstream=outstream, errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
def test_no_refspec(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, outstream=outstream,
errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
class StatusTests(PorcelainTestCase):
def test_empty(self):
results = porcelain.status(self.repo)
self.assertEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertEqual([], results.unstaged)
def test_status_base(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# modify access and modify time of path
os.utime(fullpath, (0, 0))
with open(fullpath, 'wb') as f:
f.write(b'stuff')
# Make a dummy file and stage it
filename_add = 'bar'
fullpath = os.path.join(self.repo.path, filename_add)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
results = porcelain.status(self.repo)
self.assertEqual(results.staged['add'][0],
filename_add.encode('ascii'))
self.assertEqual(results.unstaged, [b'foo'])
def test_status_all(self):
del_path = os.path.join(self.repo.path, 'foo')
mod_path = os.path.join(self.repo.path, 'bar')
add_path = os.path.join(self.repo.path, 'baz')
us_path = os.path.join(self.repo.path, 'blye')
ut_path = os.path.join(self.repo.path, 'blyat')
with open(del_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('origstuff')
with open(us_path, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[del_path, mod_path, us_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
porcelain.remove(self.repo.path, [del_path])
with open(add_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('more_origstuff')
with open(us_path, 'w') as f:
f.write('more_origstuff')
porcelain.add(repo=self.repo.path, paths=[add_path, mod_path])
with open(us_path, 'w') as f:
f.write('\norigstuff')
with open(ut_path, 'w') as f:
f.write('origstuff')
results = porcelain.status(self.repo.path)
self.assertDictEqual(
{'add': [b'baz'], 'delete': [b'foo'], 'modify': [b'bar']},
results.staged)
self.assertListEqual(results.unstaged, [b'blye'])
self.assertListEqual(results.untracked, ['blyat'])
def test_status_crlf_mismatch(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [b'crlf'])
self.assertListEqual(results.untracked, [])
def test_status_crlf_convert(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
# TODO: It should be set automatically by looking at the configuration
c = self.repo.get_config()
c.set("core", "autocrlf", True)
c.write_to_path()
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_get_tree_changes_add(self):
# Make a dummy file, stage
filename = 'bar'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['add'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 1)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_modify(self):
# Make a dummy file, stage, commit, modify
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
with open(fullpath, 'w') as f:
f.write('otherstuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['modify'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 1)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_delete(self):
# Make a dummy file, stage, commit, remove
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(repo=self.repo.path, paths=[filename])
finally:
os.chdir(cwd)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['delete'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 1)
def test_get_untracked_paths(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('ignored\n')
with open(os.path.join(self.repo.path, 'ignored'), 'w') as f:
f.write('blah\n')
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
self.assertEqual(
set(['ignored', 'notignored', '.gitignore']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(set(['.gitignore', 'notignored']),
set(porcelain.status(self.repo).untracked))
self.assertEqual(set(['.gitignore', 'notignored', 'ignored']),
set(porcelain.status(self.repo, ignored=True)
.untracked))
def test_get_untracked_paths_nested(self):
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
subrepo = Repo.init(os.path.join(self.repo.path, 'nested'), mkdir=True)
with open(os.path.join(subrepo.path, 'another'), 'w') as f:
f.write('foo\n')
self.assertEqual(
set(['notignored']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(
set(['another']),
set(porcelain.get_untracked_paths(subrepo.path, subrepo.path,
subrepo.open_index())))
# TODO(jelmer): Add test for dulwich.porcelain.daemon
class UploadPackTests(PorcelainTestCase):
def test_upload_pack(self):
outf = BytesIO()
exitcode = porcelain.upload_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([b"0000"], outlines)
self.assertEqual(0, exitcode)
class ReceivePackTests(PorcelainTestCase):
def test_receive_pack(self):
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
self.repo.do_commit(message=b'test status',
author=b'author <email>',
committer=b'committer <email>',
author_timestamp=1402354300,
commit_timestamp=1402354300, author_timezone=0,
commit_timezone=0)
outf = BytesIO()
exitcode = porcelain.receive_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([
b'0091319b56ce3aee2d489f759736a79cc552c9bb86d9 HEAD\x00 report-status ' # noqa: E501
b'delete-refs quiet ofs-delta side-band-64k '
b'no-done symref=HEAD:refs/heads/master',
b'003f319b56ce3aee2d489f759736a79cc552c9bb86d9 refs/heads/master',
b'0000'], outlines)
self.assertEqual(0, exitcode)
class BranchListTests(PorcelainTestCase):
def test_standard(self):
self.assertEqual(set([]), set(porcelain.branch_list(self.repo)))
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchCreateTests(PorcelainTestCase):
def test_branch_exists(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertRaises(KeyError, porcelain.branch_create, self.repo, b"foo")
porcelain.branch_create(self.repo, b"foo", force=True)
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
def test_simple_unicode(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, 'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, 'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
class FetchTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
def test_with_remote_name(self):
remote_name = b'origin'
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# Capture current refs
target_refs = target_repo.get_refs()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path, remote_name=remote_name,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes, as well as updates
# for the refs
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
self.assertNotEqual(self.repo.get_refs(), target_refs)
def assert_correct_remote_refs(
self, local_refs, remote_refs, remote_name=b'origin'):
local_ref_prefix = b'refs/heads'
remote_ref_prefix = b'refs/remotes/' + remote_name
locally_known_remote_refs = {
k[len(remote_ref_prefix) + 1:]: v for k, v in local_refs.items()
if k.startswith(remote_ref_prefix)}
normalized_remote_refs = {
k[len(local_ref_prefix) + 1:]: v for k, v in remote_refs.items()
if k.startswith(local_ref_prefix)}
self.assertEqual(locally_known_remote_refs, normalized_remote_refs)
class RepackTests(PorcelainTestCase):
def test_empty(self):
porcelain.repack(self.repo)
def test_simple(self):
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.repack(self.repo)
class LsTreeTests(PorcelainTestCase):
def test_empty(self):
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(f.getvalue(), "")
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tfoo\n')
def test_recursive(self):
# Create a directory then write a dummy file in it
dirpath = os.path.join(self.repo.path, 'adir')
filepath = os.path.join(dirpath, 'afile')
os.mkdir(dirpath)
with open(filepath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[filepath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f, recursive=True)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n'
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tadir'
'/afile\n')
class LsRemoteTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual({}, porcelain.ls_remote(self.repo.path))
def test_some(self):
cid = porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
self.assertEqual({
b'refs/heads/master': cid,
b'HEAD': cid},
porcelain.ls_remote(self.repo.path))
class LsFilesTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual([], list(porcelain.ls_files(self.repo)))
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual([b'foo'], list(porcelain.ls_files(self.repo)))
class RemoteAddTests(PorcelainTestCase):
def test_new(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
c = self.repo.get_config()
self.assertEqual(
c.get((b'remote', b'jelmer'), b'url'),
b'git://jelmer.uk/code/dulwich')
def test_exists(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
self.assertRaises(porcelain.RemoteExists, porcelain.remote_add,
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
class CheckIgnoreTests(PorcelainTestCase):
def test_check_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo')
foo_path = os.path.join(self.repo.path, 'foo')
with open(foo_path, 'w') as f:
f.write('BAR')
bar_path = os.path.join(self.repo.path, 'bar')
with open(bar_path, 'w') as f:
f.write('BAR')
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [foo_path])))
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [bar_path])))
def test_check_added_abs(self):
path = os.path.join(self.repo.path, 'foo')
with open(path, 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [path])))
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [path], no_index=True)))
def test_check_added_rel(self):
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'bar'))
os.chdir(os.path.join(self.repo.path, 'bar'))
try:
self.assertEqual(
list(porcelain.check_ignore(self.repo, ['../foo'])), [])
self.assertEqual(['../foo'], list(
porcelain.check_ignore(self.repo, ['../foo'], no_index=True)))
finally:
os.chdir(cwd)
class UpdateHeadTests(PorcelainTestCase):
def test_set_to_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/blah',
self.repo.refs.read_ref(b'HEAD'))
def test_set_to_branch_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_to_commit_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, c1.id, detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", new_branch="bar")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/bar',
self.repo.refs.read_ref(b'HEAD'))
class MailmapTests(PorcelainTestCase):
def test_no_mailmap(self):
self.assertEqual(
b'Jelmer Vernooij <jelmer@samba.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
def test_mailmap_lookup(self):
with open(os.path.join(self.repo.path, '.mailmap'), 'wb') as f:
f.write(b"""\
Jelmer Vernooij <jelmer@debian.org>
""")
self.assertEqual(
b'Jelmer Vernooij <jelmer@debian.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
class FsckTests(PorcelainTestCase):
def test_none(self):
self.assertEqual(
[],
list(porcelain.fsck(self.repo)))
def test_git_dir(self):
obj = Tree()
a = Blob()
a.data = b"foo"
obj.add(b".git", 0o100644, a.id)
self.repo.object_store.add_objects(
[(a, None), (obj, None)])
self.assertEqual(
[(obj.id, 'invalid name .git')],
[(sha, str(e)) for (sha, e) in porcelain.fsck(self.repo)])
class DescribeTests(PorcelainTestCase):
def test_no_commits(self):
self.assertRaises(KeyError, porcelain.describe, self.repo.path)
def test_single_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
def test_tag(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
self.assertEqual(
"tryme",
porcelain.describe(self.repo.path))
def test_tag_and_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
with open(fullpath, 'w') as f:
f.write("BAR2")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'tryme-1-g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
class HelperTests(PorcelainTestCase):
def test_path_to_tree_path_base(self):
self.assertEqual(
b'bar', porcelain.path_to_tree_path('/home/foo', '/home/foo/bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', 'bar'))
cwd = os.getcwd()
self.assertEqual(
b'bar', porcelain.path_to_tree_path('.', os.path.join(cwd, 'bar')))
self.assertEqual(b'bar', porcelain.path_to_tree_path(cwd, 'bar'))
def test_path_to_tree_path_syntax(self):
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', b'./bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', b'./bar'))
def test_path_to_tree_path_error(self):
with self.assertRaises(ValueError):
porcelain.path_to_tree_path('/home/foo/', '/home/bar/baz')
def test_path_to_tree_path_rel(self):
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'foo'))
os.mkdir(os.path.join(self.repo.path, 'foo/bar'))
try:
os.chdir(os.path.join(self.repo.path, 'foo/bar'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', 'baz'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'),
os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'), 'baz'))
finally:
os.chdir(cwd)
class GetObjectBypathTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
b"BAR",
porcelain.get_object_by_path(self.repo, 'foo').data)
def test_missing(self):
self.assertRaises(
KeyError,
porcelain.get_object_by_path, self.repo, 'foo')
class WriteTreeTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual(
b'd2092c8a9f311f0311083bf8d177f2ca0ab5b241',
porcelain.write_tree(self.repo))
| true
| true
|
1c461aa2e5f63fa27680aa6cf11215cb8e9c8883
| 1,802
|
py
|
Python
|
rllib/examples/export/onnx_torch.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
rllib/examples/export/onnx_torch.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
rllib/examples/export/onnx_torch.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
from distutils.version import LooseVersion
import numpy as np
import ray
import ray.rllib.agents.ppo as ppo
import onnxruntime
import os
import shutil
import torch
# Configure our PPO trainer
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 1
config["framework"] = "torch"
outdir = "export_torch"
if os.path.exists(outdir):
shutil.rmtree(outdir)
np.random.seed(1234)
# We will run inference with this test batch
test_data = {
"obs": np.random.uniform(0, 1.0, size=(10, 4)).astype(np.float32),
"state_ins": np.array([0.0], dtype=np.float32),
}
# Start Ray and initialize a PPO trainer
ray.init()
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
# You could train the model here
# trainer.train()
# Let's run inference on the torch model
policy = trainer.get_policy()
result_pytorch, _ = policy.model(
{
"obs": torch.tensor(test_data["obs"]),
}
)
# Evaluate tensor to fetch numpy array
result_pytorch = result_pytorch.detach().numpy()
# This line will export the model to ONNX
res = trainer.export_policy_model(outdir, onnx=11)
# Import ONNX model
exported_model_file = os.path.join(outdir, "model.onnx")
# Start an inference session for the ONNX model
session = onnxruntime.InferenceSession(exported_model_file, None)
# Pass the same test batch to the ONNX model
if LooseVersion(torch.__version__) < LooseVersion("1.9.0"):
# In torch < 1.9.0 the second input/output name gets mixed up
test_data["state_outs"] = test_data.pop("state_ins")
result_onnx = session.run(["output"], test_data)
# These results should be equal!
print("PYTORCH", result_pytorch)
print("ONNX", result_onnx)
assert np.allclose(result_pytorch, result_onnx), "Model outputs are NOT equal. FAILED"
print("Model outputs are equal. PASSED")
| 26.115942
| 86
| 0.736404
|
from distutils.version import LooseVersion
import numpy as np
import ray
import ray.rllib.agents.ppo as ppo
import onnxruntime
import os
import shutil
import torch
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 1
config["framework"] = "torch"
outdir = "export_torch"
if os.path.exists(outdir):
shutil.rmtree(outdir)
np.random.seed(1234)
test_data = {
"obs": np.random.uniform(0, 1.0, size=(10, 4)).astype(np.float32),
"state_ins": np.array([0.0], dtype=np.float32),
}
ray.init()
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
result_pytorch, _ = policy.model(
{
"obs": torch.tensor(test_data["obs"]),
}
)
# Evaluate tensor to fetch numpy array
result_pytorch = result_pytorch.detach().numpy()
# This line will export the model to ONNX
res = trainer.export_policy_model(outdir, onnx=11)
# Import ONNX model
exported_model_file = os.path.join(outdir, "model.onnx")
# Start an inference session for the ONNX model
session = onnxruntime.InferenceSession(exported_model_file, None)
# Pass the same test batch to the ONNX model
if LooseVersion(torch.__version__) < LooseVersion("1.9.0"):
# In torch < 1.9.0 the second input/output name gets mixed up
test_data["state_outs"] = test_data.pop("state_ins")
result_onnx = session.run(["output"], test_data)
# These results should be equal!
print("PYTORCH", result_pytorch)
print("ONNX", result_onnx)
assert np.allclose(result_pytorch, result_onnx), "Model outputs are NOT equal. FAILED"
print("Model outputs are equal. PASSED")
| true
| true
|
1c461b183b4ab4d591ec0f8eb4bc1dd4b40c8651
| 152
|
py
|
Python
|
webapp/urls.py
|
knschuckmann/Django_tableview
|
1b874baf96fc72756e63f9c4178465c7064b9465
|
[
"Apache-2.0"
] | null | null | null |
webapp/urls.py
|
knschuckmann/Django_tableview
|
1b874baf96fc72756e63f9c4178465c7064b9465
|
[
"Apache-2.0"
] | null | null | null |
webapp/urls.py
|
knschuckmann/Django_tableview
|
1b874baf96fc72756e63f9c4178465c7064b9465
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.urls import path
from webapp import views
urlpatterns = [
path('', views.TableView.as_view(), name='webapp'),
]
| 21.714286
| 55
| 0.664474
|
from django.urls import path
from webapp import views
urlpatterns = [
path('', views.TableView.as_view(), name='webapp'),
]
| true
| true
|
1c461c15867001aca948defb8fbac5a5e9fb967f
| 11,442
|
py
|
Python
|
tests/Demo.py
|
adityasingh177/trusted-compute-framework
|
b91410f6da21ba4d7458dd02048a447bcd4fed5a
|
[
"Apache-2.0"
] | null | null | null |
tests/Demo.py
|
adityasingh177/trusted-compute-framework
|
b91410f6da21ba4d7458dd02048a447bcd4fed5a
|
[
"Apache-2.0"
] | null | null | null |
tests/Demo.py
|
adityasingh177/trusted-compute-framework
|
b91410f6da21ba4d7458dd02048a447bcd4fed5a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import argparse
import random
import json
import logging
from service_client.generic import GenericServiceClient
import crypto.crypto as crypto
import utility.signature as signature
import worker.worker_details as worker
from shared_kv.shared_kv_interface import KvStorage
import utility.utility as enclave_helper
import utility.file_utils as futils
from error_code.error_status import SignatureStatus, WorkOrderStatus
TCFHOME = os.environ.get("TCF_HOME", "../../")
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def LocalMain(config):
if not input_json_str and not input_json_dir:
logger.error("JSON input file is not provided")
exit(1)
if not output_json_file_name:
logger.error("JSON output file is not provided")
exit(1)
if not server_uri:
logger.error("Server URI is not provided")
exit(1)
logger.info("Execute work order")
uri_client = GenericServiceClient(server_uri)
response = None
wo_id = None
if input_json_dir:
directory = os.fsencode(input_json_dir)
files = os.listdir(directory)
for file in sorted(files):
logger.info("---------------Input file name: %s ---------------\n",
file.decode("utf-8"))
input_json_str1 = futils.read_json_file((directory.decode("utf-8") + file.decode("utf-8")))
# -----------------------------------------------------------------
# If Client request is WorkOrderSubmit, a requester payload's
# signature with the requester private signing key is generated.
if "WorkOrderSubmit" in input_json_str1:
# Update workOrderId , workerId and workloadId
input_json_obj = json.loads(input_json_str1)
wo_id = hex(random.randint(1, 2**64 - 1))
input_json_obj["params"]["workOrderId"] = wo_id
input_json_obj["params"]["workerId"] = worker_obj.worker_id
# Convert workloadId to a hex string and update the request
workload_id = input_json_obj["params"]["workloadId"]
workload_id_hex = workload_id.encode("UTF-8").hex()
input_json_obj["params"]["workloadId"] = workload_id_hex
input_json_str1 = json.dumps(input_json_obj)
# Generate session iv an encrypted session key
session_iv = enclave_helper.generate_iv()
session_key = enclave_helper.generate_key()
encrypted_session_key = enclave_helper.generate_encrypted_key(session_key,
worker_obj.encryption_key)
input_json_str1, status = sig_obj.generate_client_signature(input_json_str1,
worker_obj, private_key, session_key, session_iv,
encrypted_session_key)
if status != SignatureStatus.PASSED:
logger.info("Generate signature failed\n")
exit(1)
if input_json_str1 is None:
continue
# -----------------------------------------------------------------
# Update the worker ID
if response:
if "workerId" in input_json_str1:
# Retrieve the worker id from the "WorkerRetrieve"
# response and update the worker id information for
# further json requests.
if "result" in response and "ids" in response["result"].keys():
input_json_final = json.loads(input_json_str1)
worker_id = response["result"]["ids"][0]
input_json_final["params"]["workerId"] = worker_id
input_json_str1 = json.dumps(input_json_final)
logger.info("**********Worker details Updated with "
"Worker ID*********\n%s\n", input_json_str1)
# -----------------------------------------------------------------
if "WorkOrderGetResult" in input_json_str1 or "WorkOrderReceiptRetrieve":
input_json_obj = json.loads(input_json_str1)
input_json_obj["params"]["workOrderId"] = wo_id
input_json_str1 = json.dumps(input_json_obj)
logger.info("*********Request Json********* \n%s\n", input_json_str1)
response = uri_client._postmsg(input_json_str1)
logger.info("**********Received Response*********\n%s\n", response)
# -----------------------------------------------------------------
# Worker details are loaded into Worker_Obj
if "WorkerRetrieve" in input_json_str1 and "result" in response:
worker_obj.load_worker(response)
# -----------------------------------------------------------------
# Polling for the "WorkOrderGetResult" and break when you get the result
while("WorkOrderGetResult" in input_json_str1 and "result" not in response):
if response["error"]["code"] != WorkOrderStatus.PENDING:
break
response = uri_client._postmsg(input_json_str1)
logger.info("Received Response : %s, \n \n ", response)
time.sleep(3)
# -----------------------------------------------------------------
# Verify the signature
if ("WorkOrderGetResult" in input_json_str1):
if "error" in response:
# Response has error, hence skip Signature verification
logger.info("Work order response has error, "
"skipping signature verification")
continue
sig_bool = sig_obj.verify_signature(response, worker_obj.verification_key)
try:
if sig_bool > 0:
logger.info("Signature Verified")
enclave_helper.decrypted_response(response,
session_key, session_iv)
else:
logger.info("Signature verification Failed")
exit(1)
except:
logger.error("ERROR: Failed to analyze Signature Verification")
exit(1)
# -----------------------------------------------------------------
else:
logger.info("Input Request %s", input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info("Received Response : %s , \n \n ", response)
exit(0)
# -----------------------------------------------------------------------------
def ParseCommandLine(config, args):
logger.info('***************** TRUSTED COMPUTE FRAMEWORK (TCF)*****************')
global input_json_str
global input_json_dir
global server_uri
global output_json_file_name
global consensus_file_name
global sig_obj
global worker_obj
global private_key
global encrypted_session_key
global session_iv
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", help="Name of the log file, __screen__ for standard output", type=str)
parser.add_argument("-p", "--private_key",
help="Private Key of the Client", type=str, default=None)
parser.add_argument("--loglevel", help="Logging level", type=str)
parser.add_argument("-i", "--input_file", help="JSON input file name", type=str, default="input.json")
parser.add_argument("--input_dir", help="Logging level", type=str, default=[])
parser.add_argument(
"-c", "--connect_uri", help="URI to send requests to", type=str, default=[])
parser.add_argument(
"output_file",
help="JSON output file name",
type=str,
default="output.json",
nargs="?")
options = parser.parse_args(args)
if config.get("Logging") is None:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
if options.logfile:
config["Logging"]["LogFile"] = options.logfile
if options.loglevel:
config["Logging"]["LogLevel"] = options.loglevel.upper()
input_json_str = None
input_json_dir = None
if options.connect_uri:
server_uri = options.connect_uri
else:
logger.error("ERROR: Please enter the server URI")
if options.input_dir:
logger.info("Load Json Directory from %s", options.input_dir)
input_json_dir = options.input_dir
elif options.input_file:
try:
logger.info("load JSON input from %s", options.input_file)
with open(options.input_file, "r") as file:
input_json_str = file.read()
except:
logger.error("ERROR: Failed to read from file %s", options.input_file)
else:
logger.info("No input found")
if options.output_file:
output_json_file_name = options.output_file
else:
output_json_file_name = None
if options.private_key:
private_key = options.private_key
else:
# Generating the private Key for the client
private_key = enclave_helper.generate_signing_keys()
# Initializing Signature object, Worker Object
sig_obj = signature.ClientSignature()
worker_obj = worker.SGXWorkerDetails()
# -----------------------------------------------------------------------------
def Main(args=None):
import config.config as pconfig
import utility.logger as plogger
# parse out the configuration file first
conffiles = ["tcs_config.toml"]
confpaths = [".", TCFHOME + "/config", "../../etc"]
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file", nargs="+")
parser.add_argument("--config-dir", help="configuration folder", nargs="+")
(options, remainder) = parser.parse_known_args(args)
if options.config:
conffiles = options.config
if options.config_dir:
confpaths = options.config_dir
try:
config = pconfig.parse_configuration_files(conffiles, confpaths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(logging.getLogger("STDERR"), logging.WARN)
ParseCommandLine(config, remainder)
LocalMain(config)
# -----------------------------------------------------------------------------
Main()
| 40.718861
| 107
| 0.569743
|
import os
import sys
import time
import argparse
import random
import json
import logging
from service_client.generic import GenericServiceClient
import crypto.crypto as crypto
import utility.signature as signature
import worker.worker_details as worker
from shared_kv.shared_kv_interface import KvStorage
import utility.utility as enclave_helper
import utility.file_utils as futils
from error_code.error_status import SignatureStatus, WorkOrderStatus
TCFHOME = os.environ.get("TCF_HOME", "../../")
logger = logging.getLogger(__name__)
def LocalMain(config):
if not input_json_str and not input_json_dir:
logger.error("JSON input file is not provided")
exit(1)
if not output_json_file_name:
logger.error("JSON output file is not provided")
exit(1)
if not server_uri:
logger.error("Server URI is not provided")
exit(1)
logger.info("Execute work order")
uri_client = GenericServiceClient(server_uri)
response = None
wo_id = None
if input_json_dir:
directory = os.fsencode(input_json_dir)
files = os.listdir(directory)
for file in sorted(files):
logger.info("---------------Input file name: %s ---------------\n",
file.decode("utf-8"))
input_json_str1 = futils.read_json_file((directory.decode("utf-8") + file.decode("utf-8")))
# signature with the requester private signing key is generated.
if "WorkOrderSubmit" in input_json_str1:
# Update workOrderId , workerId and workloadId
input_json_obj = json.loads(input_json_str1)
wo_id = hex(random.randint(1, 2**64 - 1))
input_json_obj["params"]["workOrderId"] = wo_id
input_json_obj["params"]["workerId"] = worker_obj.worker_id
# Convert workloadId to a hex string and update the request
workload_id = input_json_obj["params"]["workloadId"]
workload_id_hex = workload_id.encode("UTF-8").hex()
input_json_obj["params"]["workloadId"] = workload_id_hex
input_json_str1 = json.dumps(input_json_obj)
# Generate session iv an encrypted session key
session_iv = enclave_helper.generate_iv()
session_key = enclave_helper.generate_key()
encrypted_session_key = enclave_helper.generate_encrypted_key(session_key,
worker_obj.encryption_key)
input_json_str1, status = sig_obj.generate_client_signature(input_json_str1,
worker_obj, private_key, session_key, session_iv,
encrypted_session_key)
if status != SignatureStatus.PASSED:
logger.info("Generate signature failed\n")
exit(1)
if input_json_str1 is None:
continue
# -----------------------------------------------------------------
# Update the worker ID
if response:
if "workerId" in input_json_str1:
# Retrieve the worker id from the "WorkerRetrieve"
# response and update the worker id information for
# further json requests.
if "result" in response and "ids" in response["result"].keys():
input_json_final = json.loads(input_json_str1)
worker_id = response["result"]["ids"][0]
input_json_final["params"]["workerId"] = worker_id
input_json_str1 = json.dumps(input_json_final)
logger.info("**********Worker details Updated with "
"Worker ID*********\n%s\n", input_json_str1)
# -----------------------------------------------------------------
if "WorkOrderGetResult" in input_json_str1 or "WorkOrderReceiptRetrieve":
input_json_obj = json.loads(input_json_str1)
input_json_obj["params"]["workOrderId"] = wo_id
input_json_str1 = json.dumps(input_json_obj)
logger.info("*********Request Json********* \n%s\n", input_json_str1)
response = uri_client._postmsg(input_json_str1)
logger.info("**********Received Response*********\n%s\n", response)
# -----------------------------------------------------------------
# Worker details are loaded into Worker_Obj
if "WorkerRetrieve" in input_json_str1 and "result" in response:
worker_obj.load_worker(response)
# -----------------------------------------------------------------
# Polling for the "WorkOrderGetResult" and break when you get the result
while("WorkOrderGetResult" in input_json_str1 and "result" not in response):
if response["error"]["code"] != WorkOrderStatus.PENDING:
break
response = uri_client._postmsg(input_json_str1)
logger.info("Received Response : %s, \n \n ", response)
time.sleep(3)
# -----------------------------------------------------------------
# Verify the signature
if ("WorkOrderGetResult" in input_json_str1):
if "error" in response:
# Response has error, hence skip Signature verification
logger.info("Work order response has error, "
"skipping signature verification")
continue
sig_bool = sig_obj.verify_signature(response, worker_obj.verification_key)
try:
if sig_bool > 0:
logger.info("Signature Verified")
enclave_helper.decrypted_response(response,
session_key, session_iv)
else:
logger.info("Signature verification Failed")
exit(1)
except:
logger.error("ERROR: Failed to analyze Signature Verification")
exit(1)
# -----------------------------------------------------------------
else:
logger.info("Input Request %s", input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info("Received Response : %s , \n \n ", response)
exit(0)
# -----------------------------------------------------------------------------
def ParseCommandLine(config, args):
logger.info('***************** TRUSTED COMPUTE FRAMEWORK (TCF)*****************')
global input_json_str
global input_json_dir
global server_uri
global output_json_file_name
global consensus_file_name
global sig_obj
global worker_obj
global private_key
global encrypted_session_key
global session_iv
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", help="Name of the log file, __screen__ for standard output", type=str)
parser.add_argument("-p", "--private_key",
help="Private Key of the Client", type=str, default=None)
parser.add_argument("--loglevel", help="Logging level", type=str)
parser.add_argument("-i", "--input_file", help="JSON input file name", type=str, default="input.json")
parser.add_argument("--input_dir", help="Logging level", type=str, default=[])
parser.add_argument(
"-c", "--connect_uri", help="URI to send requests to", type=str, default=[])
parser.add_argument(
"output_file",
help="JSON output file name",
type=str,
default="output.json",
nargs="?")
options = parser.parse_args(args)
if config.get("Logging") is None:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
if options.logfile:
config["Logging"]["LogFile"] = options.logfile
if options.loglevel:
config["Logging"]["LogLevel"] = options.loglevel.upper()
input_json_str = None
input_json_dir = None
if options.connect_uri:
server_uri = options.connect_uri
else:
logger.error("ERROR: Please enter the server URI")
if options.input_dir:
logger.info("Load Json Directory from %s", options.input_dir)
input_json_dir = options.input_dir
elif options.input_file:
try:
logger.info("load JSON input from %s", options.input_file)
with open(options.input_file, "r") as file:
input_json_str = file.read()
except:
logger.error("ERROR: Failed to read from file %s", options.input_file)
else:
logger.info("No input found")
if options.output_file:
output_json_file_name = options.output_file
else:
output_json_file_name = None
if options.private_key:
private_key = options.private_key
else:
# Generating the private Key for the client
private_key = enclave_helper.generate_signing_keys()
# Initializing Signature object, Worker Object
sig_obj = signature.ClientSignature()
worker_obj = worker.SGXWorkerDetails()
# -----------------------------------------------------------------------------
def Main(args=None):
import config.config as pconfig
import utility.logger as plogger
# parse out the configuration file first
conffiles = ["tcs_config.toml"]
confpaths = [".", TCFHOME + "/config", "../../etc"]
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file", nargs="+")
parser.add_argument("--config-dir", help="configuration folder", nargs="+")
(options, remainder) = parser.parse_known_args(args)
if options.config:
conffiles = options.config
if options.config_dir:
confpaths = options.config_dir
try:
config = pconfig.parse_configuration_files(conffiles, confpaths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(logging.getLogger("STDERR"), logging.WARN)
ParseCommandLine(config, remainder)
LocalMain(config)
# -----------------------------------------------------------------------------
Main()
| true
| true
|
1c461c7ae39191873d06db62c17134524c45c945
| 16,111
|
py
|
Python
|
vstruct/defs/pcap.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 716
|
2015-01-01T14:41:11.000Z
|
2022-03-28T06:51:50.000Z
|
vstruct/defs/pcap.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 266
|
2015-01-01T15:07:27.000Z
|
2022-03-30T15:19:26.000Z
|
vstruct/defs/pcap.py
|
rnui2k/vivisect
|
b7b00f2d03defef28b4b8c912e3a8016e956c5f7
|
[
"ECL-2.0",
"Apache-2.0"
] | 159
|
2015-01-01T16:19:44.000Z
|
2022-03-21T21:55:34.000Z
|
import logging
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
logger = logging.getLogger(__name__)
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAP_LINKTYPE_LINUX_SLL = 113
PCAP_DLT_RAW = 12
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
#PCAPNG_BLOCKTYPE_SECTION_HEADER options
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
#PCAPNG_INTERFACE_DESCRIPTION_BLOCK options
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
# options for PCAPNG_ENHANCED_PACKET_BLOCK
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
# values used in the blocktype field
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
'''
Used to read the block type & size when parsing the file
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
'''
Used to inherit the weird parsing style where there's variable length
options at the end, followed by the duplicate block total length
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
#non-vstruct field, set during checking BOM
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
#(blocksize-4): because we still need the trailing blocksize2
# apparently blocks can completely omit the options list and not
# even have the OPT_ENDOFOPT entry
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
# append trailing blocksize2
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
#pad, plus we skip
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
#if it matches, then the endian of bom is correct
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def vsParse(self, bytez, offset=0):
'''
We need the tsresol value to adjust timestamp values, so pull it
out here
'''
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
#default offset is 0
self.tsoffset = 0
#sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options')
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = ord(opt.bytes[0])
#sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol)
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
#sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset)
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
'''
Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp
'''
#orange left off here
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
#if not set, capture assumes 10e-6 resolution
pass
elif (0x80 & idb.tsresol) == 0:
# remaining bits are resolution, to a negative power of 10
scale = 10**(idb.tsresol & 0x7f)
else:
# remaining bits are resolution, to a negative power of 2
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
'''
Note: no variable length options fields, so inheriting from vstruct directly
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
#no timestamp in this type of block :(
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
with open(filename, 'rb') as fd:
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
'''
Figure out if it's a tcpdump format, or pcapng
'''
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
#reset interface stuff since we're in a new section
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
logger.warning('Unknown block type: 0x%08x: 0x%08x 0x%08x bytes', roff, header.blocktype, header.blocksize)
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
'''
pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK
On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata)
Returns None if the packet can't be parsed
'''
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
return None
| 32.547475
| 119
| 0.597232
|
import logging
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
logger = logging.getLogger(__name__)
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAP_LINKTYPE_LINUX_SLL = 113
PCAP_DLT_RAW = 12
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
def vsParse(self, bytez, offset=0):
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
self.tsoffset = 0
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = ord(opt.bytes[0])
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
pass
elif (0x80 & idb.tsresol) == 0:
scale = 10**(idb.tsresol & 0x7f)
else:
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
with open(filename, 'rb') as fd:
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
logger.warning('Unknown block type: 0x%08x: 0x%08x 0x%08x bytes', roff, header.blocktype, header.blocksize)
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
return None
| true
| true
|
1c461db4bc60cf1e92582559dd48bd01ee94d6f7
| 456
|
py
|
Python
|
src/util/__init__.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/util/__init__.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
from typing import Iterable
# skip mypy check because open issue https://github.com/python/typing/issues/760
def argmin(elements: Iterable) -> int:
"""Returns first index of smallest element."""
return min(enumerate(elements), key=lambda x: x[1])[0] # type: ignore
def argmax(elements: Iterable) -> int:
"""Returns first index of largest element."""
return max(enumerate(elements), key=lambda x: x[1])[0] # type: ignore
| 32.571429
| 81
| 0.677632
|
from typing import Iterable
def argmin(elements: Iterable) -> int:
return min(enumerate(elements), key=lambda x: x[1])[0]
def argmax(elements: Iterable) -> int:
return max(enumerate(elements), key=lambda x: x[1])[0]
| true
| true
|
1c461e2d8f683c54e0e3cf71b790ddfb6dc91f8a
| 2,131
|
py
|
Python
|
opencv_disparity/test.py
|
salihmarangoz/StereoDepthEstimation
|
a068df34329ee0642b5eb4277dedcd7012d78b4d
|
[
"MIT"
] | null | null | null |
opencv_disparity/test.py
|
salihmarangoz/StereoDepthEstimation
|
a068df34329ee0642b5eb4277dedcd7012d78b4d
|
[
"MIT"
] | null | null | null |
opencv_disparity/test.py
|
salihmarangoz/StereoDepthEstimation
|
a068df34329ee0642b5eb4277dedcd7012d78b4d
|
[
"MIT"
] | null | null | null |
##################################################################################
# SOURCE: https://github.com/aliyasineser/stereoDepth/blob/master/stereo_depth.py
##################################################################################
import numpy as np
import cv2 as cv
import cv2
from matplotlib import pyplot as plt
def depth_map(imgL, imgR):
""" Depth map calculation. Works with SGBM and WLS. Need rectified images, returns depth map ( left to right disparity ) """
# SGBM Parameters -----------------
window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
left_matcher = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities=12*16, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1=8 * 5 * window_size,
# wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 5 * window_size,
disp12MaxDiff=12,
uniquenessRatio=10,
speckleWindowSize=50,
speckleRange=32,
preFilterCap=63,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# FILTER Parameters
lmbda = 80000
sigma = 1.3
visual_multiplier = 6
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16
dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put "imgL" here!!!
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
filteredImg = np.uint8(filteredImg)
return filteredImg
imgL = cv.imread('l.png',0)
imgR = cv.imread('r.png',0)
disparity = depth_map(imgL, imgR)
plt.imshow(disparity,'gray')
plt.show()
| 38.745455
| 136
| 0.63679
| true
| true
|
|
1c461f5be0efef6234d9d0aa8c49ba9cdafb8ecd
| 10,102
|
py
|
Python
|
tests/unit/fs.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/fs.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/fs.py
|
ach3/fibratus
|
655f0e6cee88caff4f75488fd90bf1bb00693847
|
[
"Apache-2.0"
] | 1
|
2022-03-07T08:05:34.000Z
|
2022-03-07T08:05:34.000Z
|
# Copyright 2015 by Nedim Sabic (RabbitStack)
# All Rights Reserved.
# http://rabbitstack.github.io
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest.mock import Mock
import pytest
from fibratus.common import DotD as dd, NA
from fibratus.fs import FsIO, FileOps
from fibratus.handle import HandleInfo, HandleType
from fibratus.kevent import KEvent
from fibratus.kevent_types import CREATE_FILE, DELETE_FILE, WRITE_FILE, RENAME_FILE, SET_FILE_INFORMATION
from fibratus.thread import ThreadRegistry
@pytest.fixture(scope='module')
def kevent():
return KEvent(Mock(spec_set=ThreadRegistry))
@pytest.fixture(scope='module')
def fsio(kevent):
handles = [HandleInfo(3080, 18446738026482168384, HandleType.DIRECTORY,
"\\Device\\HarddiskVolume2\\Users\\Nedo\\AppData\\Local\\VirtualStore", 640),
HandleInfo(2010, 18446738023471035392, HandleType.FILE,
"\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll", 640)]
fsio = FsIO(kevent, handles)
fsio.file_pool[18446738026474426144] = '\\Device\\HarddiskVolume2\\fibratus.log'
return fsio
class TestFsIO():
def test_init_fsio(self, fsio):
assert len(fsio.file_handles) == 2
@pytest.mark.parametrize('expected_op, kfsio',
[(FileOps.SUPERSEDE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 1223456,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
(FileOps.OPEN, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
(FileOps.CREATE, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 33554532,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
(FileOps.OPEN_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 58651617,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
(FileOps.OVERWRITE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 78874400,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
(FileOps.OVERWRITE_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 83886112,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0}))])
def test_create_file_operation(self, expected_op, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
kparams = kevent.params
assert kparams.file == kfsio.open_path
assert kparams.tid == kfsio.ttid
assert kparams.pid == kfsio.process_id
assert kparams.operation == expected_op.name
@pytest.mark.parametrize('expected_share_mask, kfsio',
[('r--', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
('-w-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
('--d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
('rw-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
('r-d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
('-wd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0})),
('rwd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 7, "file_attributes": 0})),
('---', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": -1, "file_attributes": 0}))])
def test_create_file_share_mask(self, expected_share_mask, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
assert kevent.params.share_mask == expected_share_mask
def test_delete_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(DELETE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\fibratus.log'
def test_write_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "process_id": 859, "io_flags": 0, "io_size": 8296,
"offset": 75279, "ttid": 1956})
fsio.parse_fsio(WRITE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == NA
assert kevent.params.io_size == kfsio.io_size / 1024
def test_rename_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738023471035392, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(RENAME_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
def test_set_file_information(self, fsio, kevent):
kfsio = dd(
{"file_object": 18446738023471035392, "ttid": 1956, "info_class": 20, "process_id": 859,
"irp_ptr": 18446738026471032392})
fsio.parse_fsio(SET_FILE_INFORMATION, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.info_class == 20
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
| 65.597403
| 123
| 0.544447
|
from unittest.mock import Mock
import pytest
from fibratus.common import DotD as dd, NA
from fibratus.fs import FsIO, FileOps
from fibratus.handle import HandleInfo, HandleType
from fibratus.kevent import KEvent
from fibratus.kevent_types import CREATE_FILE, DELETE_FILE, WRITE_FILE, RENAME_FILE, SET_FILE_INFORMATION
from fibratus.thread import ThreadRegistry
@pytest.fixture(scope='module')
def kevent():
return KEvent(Mock(spec_set=ThreadRegistry))
@pytest.fixture(scope='module')
def fsio(kevent):
handles = [HandleInfo(3080, 18446738026482168384, HandleType.DIRECTORY,
"\\Device\\HarddiskVolume2\\Users\\Nedo\\AppData\\Local\\VirtualStore", 640),
HandleInfo(2010, 18446738023471035392, HandleType.FILE,
"\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll", 640)]
fsio = FsIO(kevent, handles)
fsio.file_pool[18446738026474426144] = '\\Device\\HarddiskVolume2\\fibratus.log'
return fsio
class TestFsIO():
def test_init_fsio(self, fsio):
assert len(fsio.file_handles) == 2
@pytest.mark.parametrize('expected_op, kfsio',
[(FileOps.SUPERSEDE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 1223456,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
(FileOps.OPEN, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
(FileOps.CREATE, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 33554532,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
(FileOps.OPEN_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 58651617,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
(FileOps.OVERWRITE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 78874400,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
(FileOps.OVERWRITE_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 83886112,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0}))])
def test_create_file_operation(self, expected_op, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
kparams = kevent.params
assert kparams.file == kfsio.open_path
assert kparams.tid == kfsio.ttid
assert kparams.pid == kfsio.process_id
assert kparams.operation == expected_op.name
@pytest.mark.parametrize('expected_share_mask, kfsio',
[('r--', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
('-w-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
('--d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
('rw-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
('r-d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
('-wd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0})),
('rwd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 7, "file_attributes": 0})),
('---', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": -1, "file_attributes": 0}))])
def test_create_file_share_mask(self, expected_share_mask, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
assert kevent.params.share_mask == expected_share_mask
def test_delete_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(DELETE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\fibratus.log'
def test_write_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "process_id": 859, "io_flags": 0, "io_size": 8296,
"offset": 75279, "ttid": 1956})
fsio.parse_fsio(WRITE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == NA
assert kevent.params.io_size == kfsio.io_size / 1024
def test_rename_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738023471035392, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(RENAME_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
def test_set_file_information(self, fsio, kevent):
kfsio = dd(
{"file_object": 18446738023471035392, "ttid": 1956, "info_class": 20, "process_id": 859,
"irp_ptr": 18446738026471032392})
fsio.parse_fsio(SET_FILE_INFORMATION, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.info_class == 20
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
| true
| true
|
1c461fa375b527ed770883ccd44488bbb7967dad
| 1,644
|
py
|
Python
|
temp_scripts/update_parameters.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 2
|
2017-05-16T10:57:29.000Z
|
2017-12-14T11:33:18.000Z
|
temp_scripts/update_parameters.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 9
|
2018-11-29T07:44:15.000Z
|
2021-12-13T19:54:18.000Z
|
temp_scripts/update_parameters.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 1
|
2019-02-28T19:00:47.000Z
|
2019-02-28T19:00:47.000Z
|
from application.Connections import Connection
from pdb import set_trace
def updateAudienceParameters(topicID, location, signal_strength):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE audience_parameters "
"SET signal_strength = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(topicID), location])
def updateInfluencerParameters(topicID, location, signal_strength, following_limit):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE influencer_parameters "
"SET signal_strength = %s, following_limit = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(following_limit), int(topicID), location])
print("Influencer or Audience ?\n1) Influencer\n2) Audience")
choice = int(input())
if choice == 1:
# Influencer
s = ""
print("Enter 'topicID, location, signal_strength, following_limit' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 4):
updateInfluencerParameters(*l)
print("UPDATED!")
s = input()
if choice == 2:
# Audience
s = ""
print("Enter 'topicID, location, signal_strength' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 3):
updateAudienceParameters(*l)
print("UPDATED!")
s = input()
| 34.25
| 113
| 0.58455
|
from application.Connections import Connection
from pdb import set_trace
def updateAudienceParameters(topicID, location, signal_strength):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE audience_parameters "
"SET signal_strength = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(topicID), location])
def updateInfluencerParameters(topicID, location, signal_strength, following_limit):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE influencer_parameters "
"SET signal_strength = %s, following_limit = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(following_limit), int(topicID), location])
print("Influencer or Audience ?\n1) Influencer\n2) Audience")
choice = int(input())
if choice == 1:
s = ""
print("Enter 'topicID, location, signal_strength, following_limit' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 4):
updateInfluencerParameters(*l)
print("UPDATED!")
s = input()
if choice == 2:
s = ""
print("Enter 'topicID, location, signal_strength' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 3):
updateAudienceParameters(*l)
print("UPDATED!")
s = input()
| true
| true
|
1c462039acecb8d459a5e841e0c153542b907b5f
| 3,583
|
py
|
Python
|
sympy/concrete/products.py
|
gnulinooks/sympy
|
46f63841f96cd025289b91ba9db3e261138d720a
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T10:08:18.000Z
|
2016-05-09T10:08:18.000Z
|
sympy/concrete/products.py
|
gnulinooks/sympy
|
46f63841f96cd025289b91ba9db3e261138d720a
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/concrete/products.py
|
gnulinooks/sympy
|
46f63841f96cd025289b91ba9db3e261138d720a
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.core import Basic, S, C, Add, Mul, Symbol, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Basic):
"""Represents unevaluated product.
"""
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Basic.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self):
prod = self._eval_product()
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, term=None):
k = self.index
a = self.lower
n = self.upper
if term is None:
term = self.term
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree:
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p)
q = self._eval_product(q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t)
if p is not None:
exclude.append(p)
else:
include.append(p)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit()
else:
return prod
| 25.055944
| 80
| 0.476137
|
from sympy.core import Basic, S, C, Add, Mul, Symbol, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Basic):
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Basic.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self):
prod = self._eval_product()
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, term=None):
k = self.index
a = self.lower
n = self.upper
if term is None:
term = self.term
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree:
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p)
q = self._eval_product(q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t)
if p is not None:
exclude.append(p)
else:
include.append(p)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit()
else:
return prod
| true
| true
|
1c4620bd5f4a647daadaabbb35603c6d6b7b073f
| 7,172
|
py
|
Python
|
fiber/middleware.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
fiber/middleware.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
fiber/middleware.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
import random
import re
import json
from urllib import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import loader, RequestContext
from django.utils.encoding import smart_text
from django.utils.html import escape
from fiber.app_settings import LOGIN_STRING, EXCLUDE_URLS, EDITOR, PERMISSION_CLASS
from fiber.models import ContentItem, Page
from fiber.utils.import_util import import_element, load_class
perms = load_class(PERMISSION_CLASS)
def is_html(response):
"""
Returns True if the response is either `text/html` or `application/xhtml+xml`
"""
content_type = response.get('Content-Type', None)
return bool(content_type and content_type.split(';')[0] in ('text/html', 'application/xhtml+xml'))
class AdminPageMiddleware(object):
LOGIN_SESSION_KEY = 'show_fiber_login'
body_re = re.compile(
r'<head>(?P<HEAD>.*)</head>(?P<AFTER_HEAD>.*)<body(?P<BODY_ATTRS>.*?)>(?P<BODY>.*)</body>',
re.DOTALL)
def __init__(self):
self.editor_settings = import_element(EDITOR)
def process_response(self, request, response):
# only process non-streaming html and xhtml responses
if is_html(response) and hasattr(response, 'content'):
if self.should_setup_login_session(request):
return self.setup_login_session(request)
if self.show_login(request) or self.show_admin(request, response):
return self.modify_response(request, response)
return response
def should_setup_login_session(self, request):
"""
Only set self.LOGIN_SESSION_KEY in the session when the request
- has LOGIN_STRING (defaults to @fiber) behind its request-url
"""
qs = unquote(request.META.get('QUERY_STRING', ''))
return request.path_info.endswith(LOGIN_STRING) or qs.endswith(LOGIN_STRING)
def setup_login_session(self, request):
"""
Add self.LOGIN_SESSION_KEY to the session and redirect to the the requested path without LOGIN_STRING
"""
request.session[self.LOGIN_SESSION_KEY] = True
url = request.path_info.replace(LOGIN_STRING, '')
qs = unquote(request.META.get('QUERY_STRING', ''))
if qs:
qs = '?%s' % qs.replace(LOGIN_STRING, '').rstrip('&')
return HttpResponseRedirect(''.join([url, qs]))
def show_login(self, request):
"""
Only show the Fiber login interface when the request
- is NOT performed by an admin user
- has session key self.LOGIN_SESSION_KEY = True
"""
return not request.user.is_staff and request.session.get(self.LOGIN_SESSION_KEY)
def show_admin(self, request, response):
"""
Only show the Fiber admin interface when the request
- is not an AJAX request
- has a response status code of 200
- is performed by an admin user
- has a user with sufficient permissions based on the Permission Class
- does not match EXCLUDE_URLS (empty by default)
"""
if request.is_ajax() or response.status_code != 200:
return False
if request.user.is_staff and perms.is_fiber_editor(request.user):
if EXCLUDE_URLS:
url = request.path_info.lstrip('/')
for exclude_url in EXCLUDE_URLS:
if re.search(exclude_url, url):
return False
return True
return False
def modify_response(self, request, response):
"""
Modify the response to include Fiber assets and data.
"""
fiber_data = {}
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>>\g<BODY></body>'
content = smart_text(response.content)
if self.show_login(request):
# Only show the login window once
request.session[self.LOGIN_SESSION_KEY] = False
fiber_data['show_login'] = True
elif self.show_admin(request, response):
if self.is_django_admin(request):
fiber_data['backend'] = True
else:
fiber_data['frontend'] = True
page = Page.objects.get_by_url(request.path_info)
if page:
fiber_data['page_id'] = page.pk
# Inject admin html in body, wrap the original body content in a div.
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>><div id="wpr-body">\g<BODY></body>'
content = content.replace('</body>', '</div>%s</body>' % self.get_body_html(request))
# Inject header html in head.
# Add fiber-data attribute to body tag.
replacement = replacement % {
'header_html': self.get_header_html(request),
'fiber_data': escape(json.dumps(fiber_data, sort_keys=True))
}
response.content = self.body_re.sub(replacement, content)
return response
def is_django_admin(self, request):
return request.path_info.startswith(reverse('admin:index'))
def get_header_html(self, request):
context = {
'editor_template_js': self.editor_settings.get('template_js'),
'editor_template_css': self.editor_settings.get('template_css'),
'BACKEND_BASE_URL': reverse('admin:index'),
'FIBER_LOGIN_URL': reverse('fiber_login'),
}
return loader.render_to_string('fiber/header.html', context, RequestContext(request))
def get_body_html(self, request):
context = {
'logout_url': self.get_logout_url(request)
}
return loader.render_to_string('fiber/admin.html', context, RequestContext(request))
def get_logout_url(self, request):
if request.META['QUERY_STRING']:
return '%s?next=%s?%s' % (reverse('admin:logout'), request.path_info, request.META['QUERY_STRING'])
else:
return '%s?next=%s' % (reverse('admin:logout'), request.path_info)
class ObfuscateEmailAddressMiddleware(object):
"""
Replaces plain email addresses with escaped addresses in (non streaming) HTML responses
"""
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'): # Do not obfuscate non-html and streaming responses.
# http://www.lampdocs.com/blog/2008/10/regular-expression-to-extract-all-e-mail-addresses-from-a-file-with-php/
email_pattern = re.compile(r'(mailto:)?[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*(\+[_a-zA-Z0-9-]+)?@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))')
response.content = email_pattern.sub(self.encode_email, response.content)
return response
def encode_email(self, matches):
encoded_char_list = []
for char in matches.group(0):
encoded_char_list.append(random.choice(['&#%d;' % ord(char), '&#x%x;' % ord(char)]))
return ''.join(encoded_char_list)
| 43.204819
| 198
| 0.641104
|
import random
import re
import json
from urllib import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import loader, RequestContext
from django.utils.encoding import smart_text
from django.utils.html import escape
from fiber.app_settings import LOGIN_STRING, EXCLUDE_URLS, EDITOR, PERMISSION_CLASS
from fiber.models import ContentItem, Page
from fiber.utils.import_util import import_element, load_class
perms = load_class(PERMISSION_CLASS)
def is_html(response):
content_type = response.get('Content-Type', None)
return bool(content_type and content_type.split(';')[0] in ('text/html', 'application/xhtml+xml'))
class AdminPageMiddleware(object):
LOGIN_SESSION_KEY = 'show_fiber_login'
body_re = re.compile(
r'<head>(?P<HEAD>.*)</head>(?P<AFTER_HEAD>.*)<body(?P<BODY_ATTRS>.*?)>(?P<BODY>.*)</body>',
re.DOTALL)
def __init__(self):
self.editor_settings = import_element(EDITOR)
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'):
if self.should_setup_login_session(request):
return self.setup_login_session(request)
if self.show_login(request) or self.show_admin(request, response):
return self.modify_response(request, response)
return response
def should_setup_login_session(self, request):
qs = unquote(request.META.get('QUERY_STRING', ''))
return request.path_info.endswith(LOGIN_STRING) or qs.endswith(LOGIN_STRING)
def setup_login_session(self, request):
request.session[self.LOGIN_SESSION_KEY] = True
url = request.path_info.replace(LOGIN_STRING, '')
qs = unquote(request.META.get('QUERY_STRING', ''))
if qs:
qs = '?%s' % qs.replace(LOGIN_STRING, '').rstrip('&')
return HttpResponseRedirect(''.join([url, qs]))
def show_login(self, request):
return not request.user.is_staff and request.session.get(self.LOGIN_SESSION_KEY)
def show_admin(self, request, response):
if request.is_ajax() or response.status_code != 200:
return False
if request.user.is_staff and perms.is_fiber_editor(request.user):
if EXCLUDE_URLS:
url = request.path_info.lstrip('/')
for exclude_url in EXCLUDE_URLS:
if re.search(exclude_url, url):
return False
return True
return False
def modify_response(self, request, response):
fiber_data = {}
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>>\g<BODY></body>'
content = smart_text(response.content)
if self.show_login(request):
request.session[self.LOGIN_SESSION_KEY] = False
fiber_data['show_login'] = True
elif self.show_admin(request, response):
if self.is_django_admin(request):
fiber_data['backend'] = True
else:
fiber_data['frontend'] = True
page = Page.objects.get_by_url(request.path_info)
if page:
fiber_data['page_id'] = page.pk
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>><div id="wpr-body">\g<BODY></body>'
content = content.replace('</body>', '</div>%s</body>' % self.get_body_html(request))
replacement = replacement % {
'header_html': self.get_header_html(request),
'fiber_data': escape(json.dumps(fiber_data, sort_keys=True))
}
response.content = self.body_re.sub(replacement, content)
return response
def is_django_admin(self, request):
return request.path_info.startswith(reverse('admin:index'))
def get_header_html(self, request):
context = {
'editor_template_js': self.editor_settings.get('template_js'),
'editor_template_css': self.editor_settings.get('template_css'),
'BACKEND_BASE_URL': reverse('admin:index'),
'FIBER_LOGIN_URL': reverse('fiber_login'),
}
return loader.render_to_string('fiber/header.html', context, RequestContext(request))
def get_body_html(self, request):
context = {
'logout_url': self.get_logout_url(request)
}
return loader.render_to_string('fiber/admin.html', context, RequestContext(request))
def get_logout_url(self, request):
if request.META['QUERY_STRING']:
return '%s?next=%s?%s' % (reverse('admin:logout'), request.path_info, request.META['QUERY_STRING'])
else:
return '%s?next=%s' % (reverse('admin:logout'), request.path_info)
class ObfuscateEmailAddressMiddleware(object):
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'):
email_pattern = re.compile(r'(mailto:)?[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*(\+[_a-zA-Z0-9-]+)?@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))')
response.content = email_pattern.sub(self.encode_email, response.content)
return response
def encode_email(self, matches):
encoded_char_list = []
for char in matches.group(0):
encoded_char_list.append(random.choice(['&#%d;' % ord(char), '&#x%x;' % ord(char)]))
return ''.join(encoded_char_list)
| true
| true
|
1c46219a94ef2b0745f859e73be317175fb547fb
| 391
|
py
|
Python
|
rsvp/urls.py
|
DXDSpirits/appsbackend
|
2c69487c4e4d6dc78091ba8030889a5ddc990836
|
[
"MIT"
] | null | null | null |
rsvp/urls.py
|
DXDSpirits/appsbackend
|
2c69487c4e4d6dc78091ba8030889a5ddc990836
|
[
"MIT"
] | null | null | null |
rsvp/urls.py
|
DXDSpirits/appsbackend
|
2c69487c4e4d6dc78091ba8030889a5ddc990836
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from rsvp import views
router = DefaultRouter()
router.register(r'rsvp', views.RsvpViewSet)
router.register(r'guest', views.GuestViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| 30.076923
| 83
| 0.754476
|
from django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from rsvp import views
router = DefaultRouter()
router.register(r'rsvp', views.RsvpViewSet)
router.register(r'guest', views.GuestViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| true
| true
|
1c4624dd307575e0198507f2f32738456ad7f101
| 1,086
|
py
|
Python
|
util.py
|
mhaberler/jumpvis
|
93b3b723d27aab7f3d4319cc91d06432022ddc6d
|
[
"MIT"
] | null | null | null |
util.py
|
mhaberler/jumpvis
|
93b3b723d27aab7f3d4319cc91d06432022ddc6d
|
[
"MIT"
] | null | null | null |
util.py
|
mhaberler/jumpvis
|
93b3b723d27aab7f3d4319cc91d06432022ddc6d
|
[
"MIT"
] | null | null | null |
def get_bounds(points):
"""
return bounding box of a list of gpxpy points
"""
min_lat = None
max_lat = None
min_lon = None
max_lon = None
min_ele = None
max_ele = None
for point in points:
if min_lat is None or point.latitude < min_lat:
min_lat = point.latitude
if max_lat is None or point.latitude > max_lat:
max_lat = point.latitude
if min_lon is None or point.longitude < min_lon:
min_lon = point.longitude
if max_lon is None or point.longitude > max_lon:
max_lon = point.longitude
if min_ele is None or point.elevation < min_ele:
min_ele = point.elevation
if max_ele is None or point.elevation > max_ele:
max_ele = point.elevation
if min_lat and max_lat and min_lon and max_lon:
return {'min_latitude': min_lat, 'max_latitude': max_lat,
'min_longitude': min_lon, 'max_longitude': max_lon,
'min_elevation': min_ele, 'max_elevation': max_ele,
}
return None
| 32.909091
| 67
| 0.608656
|
def get_bounds(points):
min_lat = None
max_lat = None
min_lon = None
max_lon = None
min_ele = None
max_ele = None
for point in points:
if min_lat is None or point.latitude < min_lat:
min_lat = point.latitude
if max_lat is None or point.latitude > max_lat:
max_lat = point.latitude
if min_lon is None or point.longitude < min_lon:
min_lon = point.longitude
if max_lon is None or point.longitude > max_lon:
max_lon = point.longitude
if min_ele is None or point.elevation < min_ele:
min_ele = point.elevation
if max_ele is None or point.elevation > max_ele:
max_ele = point.elevation
if min_lat and max_lat and min_lon and max_lon:
return {'min_latitude': min_lat, 'max_latitude': max_lat,
'min_longitude': min_lon, 'max_longitude': max_lon,
'min_elevation': min_ele, 'max_elevation': max_ele,
}
return None
| true
| true
|
1c4624f33204aa75a7cdc3d84fb7b0e45eb71211
| 8,645
|
py
|
Python
|
vendor-local/lib/python/taggit/managers.py
|
lmorchard/badg.us
|
aa75b9cb6858e99de16aa840add0eef9065fdb4c
|
[
"BSD-3-Clause"
] | 4
|
2015-09-01T01:19:45.000Z
|
2018-05-16T16:03:10.000Z
|
vendor-local/lib/python/taggit/managers.py
|
lmorchard/badg.us
|
aa75b9cb6858e99de16aa840add0eef9065fdb4c
|
[
"BSD-3-Clause"
] | 7
|
2022-01-11T19:42:12.000Z
|
2022-01-11T19:42:55.000Z
|
vendor-local/lib/python/taggit/managers.py
|
lmorchard/badg.us
|
aa75b9cb6858e99de16aa840add0eef9065fdb4c
|
[
"BSD-3-Clause"
] | 3
|
2015-05-21T15:36:01.000Z
|
2020-11-20T23:58:12.000Z
|
from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField, add_lazy_relation
from django.db.models.related import RelatedObject
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
# 2.4 compat
try:
from django.utils.itercompat import all
except ImportError:
# 1.1.X compat
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."), through=None, blank=False):
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# Checking for existing tags irrespective of the case
if str_tags:
q = models.Q()
for str_tag in str_tags:
q |= models.Q(name__iexact=str_tag)
existing = self.through.tag_model().objects.filter(q)
tag_objs.update(existing)
existing_low = [t.name.lower() for t in existing]
new_tags = [t for t in str_tags if t.lower() not in existing_low]
for new_tag in new_tags:
tag_objs.add(self.through.tag_model().objects.create(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
q = models.Q()
for tag in tags:
q |= models.Q(tag__name__iexact=tag)
self.through.objects.filter(**self._lookup_kwargs()).filter(
q).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by('-n')
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| 34.035433
| 90
| 0.616888
|
from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField, add_lazy_relation
from django.db.models.related import RelatedObject
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
try:
from django.utils.itercompat import all
except ImportError:
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."), through=None, blank=False):
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
if str_tags:
q = models.Q()
for str_tag in str_tags:
q |= models.Q(name__iexact=str_tag)
existing = self.through.tag_model().objects.filter(q)
tag_objs.update(existing)
existing_low = [t.name.lower() for t in existing]
new_tags = [t for t in str_tags if t.lower() not in existing_low]
for new_tag in new_tags:
tag_objs.add(self.through.tag_model().objects.create(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
q = models.Q()
for tag in tags:
q |= models.Q(tag__name__iexact=tag)
self.through.objects.filter(**self._lookup_kwargs()).filter(
q).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by('-n')
items = {}
if len(lookup_keys) == 1:
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| true
| true
|
1c46258e69edc1d51b3b465582f6145ad636ebc5
| 813
|
py
|
Python
|
Controller/countryStatisticsHashedUserIdsController.py
|
lionick/map-ip-to-country
|
ccc44b511b7cf1451849038bae66e682140a68a9
|
[
"Apache-2.0"
] | null | null | null |
Controller/countryStatisticsHashedUserIdsController.py
|
lionick/map-ip-to-country
|
ccc44b511b7cf1451849038bae66e682140a68a9
|
[
"Apache-2.0"
] | null | null | null |
Controller/countryStatisticsHashedUserIdsController.py
|
lionick/map-ip-to-country
|
ccc44b511b7cf1451849038bae66e682140a68a9
|
[
"Apache-2.0"
] | 1
|
2021-03-16T11:07:22.000Z
|
2021-03-16T11:07:22.000Z
|
from datetime import date, timedelta
from Model.ipStatistics import ipStatistics
from Model.countryStatisticsHashedUserId import countryStatisticsHashedUserId
from datetime import datetime, timedelta
class countryStatisticsHashedUserIdsController:
@classmethod
def getDataNotMapped(self):
dateFrom = countryStatisticsHashedUserId.getLastDate()
# we dont have any country statistics saved
if dateFrom[0][0] == None:
result = ipStatistics.getAllIpStatistics()
else:
dayAfter = dateFrom[0][0] + timedelta(days=1)
dayFrom = dayAfter.strftime('%Y-%m-%d 00:00:00')
yesterday = date.today() - timedelta(days=1)
dateTo = yesterday.strftime('%Y-%m-%d 23:59:59')
result = ipStatistics.getIpStatisticsByDate(dayFrom, dateTo)
return result
| 33.875
| 77
| 0.723247
|
from datetime import date, timedelta
from Model.ipStatistics import ipStatistics
from Model.countryStatisticsHashedUserId import countryStatisticsHashedUserId
from datetime import datetime, timedelta
class countryStatisticsHashedUserIdsController:
@classmethod
def getDataNotMapped(self):
dateFrom = countryStatisticsHashedUserId.getLastDate()
if dateFrom[0][0] == None:
result = ipStatistics.getAllIpStatistics()
else:
dayAfter = dateFrom[0][0] + timedelta(days=1)
dayFrom = dayAfter.strftime('%Y-%m-%d 00:00:00')
yesterday = date.today() - timedelta(days=1)
dateTo = yesterday.strftime('%Y-%m-%d 23:59:59')
result = ipStatistics.getIpStatisticsByDate(dayFrom, dateTo)
return result
| true
| true
|
1c46261cd54386528b25cc006d779402084d8229
| 484
|
py
|
Python
|
PyPark/version.py
|
liuzhuogood/PyPark
|
e605502344a3bfcc7696ba56f193fd50d773f1ea
|
[
"Apache-2.0"
] | 1
|
2021-11-16T10:33:01.000Z
|
2021-11-16T10:33:01.000Z
|
PyPark/version.py
|
liuzhuogood/PyPark
|
e605502344a3bfcc7696ba56f193fd50d773f1ea
|
[
"Apache-2.0"
] | null | null | null |
PyPark/version.py
|
liuzhuogood/PyPark
|
e605502344a3bfcc7696ba56f193fd50d773f1ea
|
[
"Apache-2.0"
] | null | null | null |
import logging
from PyPark.util.zk_util import path_join
def print_infos(pk):
for u in pk.rest.services.keys():
pk.log.info(f"Rest Service : /{path_join(pk.rest_base_url, u)}")
if len(pk.rest.services.keys()) > 0:
logging.info(f"Started By [{pk.group}] http://{pk.ip}:{pk.port}")
if pk.nat_port:
logging.info(f"Started By [NAT] http://{pk.nat_ip}:{pk.nat_port}")
if pk.debug:
logging.warning(f"Debug Enable Address:{pk.debug_host}")
| 32.266667
| 74
| 0.646694
|
import logging
from PyPark.util.zk_util import path_join
def print_infos(pk):
for u in pk.rest.services.keys():
pk.log.info(f"Rest Service : /{path_join(pk.rest_base_url, u)}")
if len(pk.rest.services.keys()) > 0:
logging.info(f"Started By [{pk.group}] http://{pk.ip}:{pk.port}")
if pk.nat_port:
logging.info(f"Started By [NAT] http://{pk.nat_ip}:{pk.nat_port}")
if pk.debug:
logging.warning(f"Debug Enable Address:{pk.debug_host}")
| true
| true
|
1c4626a4a0981b699bd3f0e091123348bc6f9ecc
| 1,097
|
py
|
Python
|
python/ConvertDocx2HtmlUsingWord.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 6
|
2019-09-25T06:43:01.000Z
|
2022-03-11T02:54:47.000Z
|
python/ConvertDocx2HtmlUsingWord.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 6
|
2019-01-06T07:35:10.000Z
|
2022-02-26T03:46:28.000Z
|
python/ConvertDocx2HtmlUsingWord.py
|
netchira/netchira.github.io
|
bed7b1425fe0ec206887be9cf48a571afbded9e8
|
[
"CC0-1.0"
] | 7
|
2021-05-14T07:04:36.000Z
|
2022-03-20T18:23:28.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 26 21:28:35 2019
Spyderエディタ
For Python ver 2.7
@author: netchira
"""
def ConvertDocx2HtmlUsingWord(DocxFilePath):
import win32com.client
import os
# ファイル拡張子の確認
if os.path.exists(DocxFilePath) and (DocxFilePath[-5:] == ".docx"):
# ファイルパスから拡張子(ピリオド含む5文字分)を取り除く
str_FilePathNoExt = DocxFilePath[0:-5]
# ファイルの拡張子として".htm"を付与
str_HtmlFilePath = str_FilePathNoExt + ".htm"
# ファイルパスとして生成
HtmlFilePath = os.path.abspath(str_HtmlFilePath)
else:
raise UserWarning("File Format is not .docx")
# Wordを起動する : Applicationオブジェクトを生成する
Application = win32com.client.Dispatch("Word.Application")
# Wordを画面表示する : VisibleプロパティをTrueにする
Application.Visible = True
# 既存文書を開く
doc = Application.Documents.Open(DocxFilePath)
# 名前を付けて保存 : ファイル形式を[Webページ(フィルター後)]に指定
WdFormatHTML = 8
WdFormatFilteredHTML = 10
doc.SaveAs2(HtmlFilePath, FileFormat=WdFormatFilteredHTML)
# 文書を閉じる
doc.Close()
# Wordを終了する : Quitメソッドを呼ぶ
Application.Quit()
| 26.119048
| 71
| 0.678213
|
def ConvertDocx2HtmlUsingWord(DocxFilePath):
import win32com.client
import os
if os.path.exists(DocxFilePath) and (DocxFilePath[-5:] == ".docx"):
str_FilePathNoExt = DocxFilePath[0:-5]
str_HtmlFilePath = str_FilePathNoExt + ".htm"
HtmlFilePath = os.path.abspath(str_HtmlFilePath)
else:
raise UserWarning("File Format is not .docx")
Application = win32com.client.Dispatch("Word.Application")
Application.Visible = True
doc = Application.Documents.Open(DocxFilePath)
WdFormatHTML = 8
WdFormatFilteredHTML = 10
doc.SaveAs2(HtmlFilePath, FileFormat=WdFormatFilteredHTML)
doc.Close()
Application.Quit()
| true
| true
|
1c4627682d3ef50f786fa60721404010b28e5f2d
| 2,151
|
py
|
Python
|
misp/utils/wsi_utils.py
|
zhoudaxia233/misp
|
c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5
|
[
"MIT"
] | 2
|
2019-12-21T10:46:57.000Z
|
2019-12-22T14:01:23.000Z
|
misp/utils/wsi_utils.py
|
zhoudaxia233/misp
|
c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5
|
[
"MIT"
] | null | null | null |
misp/utils/wsi_utils.py
|
zhoudaxia233/misp
|
c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5
|
[
"MIT"
] | null | null | null |
import os
import openslide
from openslide.deepzoom import DeepZoomGenerator
from tqdm import tqdm
__all__ = ['WSI', 'validate_mpp', 'stitch_tiles']
class WSI():
def __init__(self, path: str, tile_size: int = 224):
self.path = path
self.tile_size = tile_size
self.slide = openslide.OpenSlide(self.path)
self.deepzoom_gen = DeepZoomGenerator(self.slide, tile_size=self.tile_size, overlap=0, limit_bounds=False)
self.mpp = (float(self.slide.properties[openslide.PROPERTY_NAME_MPP_X]),
float(self.slide.properties[openslide.PROPERTY_NAME_MPP_Y]))
self.level_count = self.deepzoom_gen.level_count
self.level_dimensions = self.deepzoom_gen.level_dimensions
self.level_tiles = self.deepzoom_gen.level_tiles
def get_info(self):
"""Get basic information of the wsi.
"""
print(f'Num of levels: {self.level_count}')
print('Dimensions of levels:')
for level_nr in range(self.level_count):
print(f'- level {level_nr}: {self.level_dimensions[level_nr]}')
print(f'MPP: {self.mpp}')
print()
def get_tile(self, row: int, col: int):
return self.deepzoom_gen.get_tile(level=self.level_count - 1, address=(col, row))
def make_tiles(self, tiles_folder_path: str):
if not os.path.isdir(tiles_folder_path):
os.mkdir(tiles_folder_path)
level = self.level_count - 1
cols, rows = self.level_tiles[level]
for row in tqdm(range(rows)):
for col in range(cols):
self.get_tile(row, col).save(tiles_folder_path + f'{row}_{col}.tif')
return
def validate_mpp(path: str, mpp: float = 0.5, thresh: float = 0.015) -> bool:
"""Validate if the input WSI has the same MPP as our training data.
"""
wsi = openslide.OpenSlide(path)
x_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_X]) - mpp
y_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_Y]) - mpp
return (abs(x_diff) < thresh) and (abs(y_diff) < thresh)
def stitch_tiles(patches_folder_path: str, target_folder_path: str):
pass
| 38.410714
| 114
| 0.666202
|
import os
import openslide
from openslide.deepzoom import DeepZoomGenerator
from tqdm import tqdm
__all__ = ['WSI', 'validate_mpp', 'stitch_tiles']
class WSI():
def __init__(self, path: str, tile_size: int = 224):
self.path = path
self.tile_size = tile_size
self.slide = openslide.OpenSlide(self.path)
self.deepzoom_gen = DeepZoomGenerator(self.slide, tile_size=self.tile_size, overlap=0, limit_bounds=False)
self.mpp = (float(self.slide.properties[openslide.PROPERTY_NAME_MPP_X]),
float(self.slide.properties[openslide.PROPERTY_NAME_MPP_Y]))
self.level_count = self.deepzoom_gen.level_count
self.level_dimensions = self.deepzoom_gen.level_dimensions
self.level_tiles = self.deepzoom_gen.level_tiles
def get_info(self):
print(f'Num of levels: {self.level_count}')
print('Dimensions of levels:')
for level_nr in range(self.level_count):
print(f'- level {level_nr}: {self.level_dimensions[level_nr]}')
print(f'MPP: {self.mpp}')
print()
def get_tile(self, row: int, col: int):
return self.deepzoom_gen.get_tile(level=self.level_count - 1, address=(col, row))
def make_tiles(self, tiles_folder_path: str):
if not os.path.isdir(tiles_folder_path):
os.mkdir(tiles_folder_path)
level = self.level_count - 1
cols, rows = self.level_tiles[level]
for row in tqdm(range(rows)):
for col in range(cols):
self.get_tile(row, col).save(tiles_folder_path + f'{row}_{col}.tif')
return
def validate_mpp(path: str, mpp: float = 0.5, thresh: float = 0.015) -> bool:
wsi = openslide.OpenSlide(path)
x_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_X]) - mpp
y_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_Y]) - mpp
return (abs(x_diff) < thresh) and (abs(y_diff) < thresh)
def stitch_tiles(patches_folder_path: str, target_folder_path: str):
pass
| true
| true
|
1c4627bc2af1de74f5fa845dc646606e7fc21076
| 110,473
|
py
|
Python
|
ds_discovery/sample/map_companies_fortune1000.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | 2
|
2020-09-21T17:24:16.000Z
|
2021-05-28T18:02:54.000Z
|
ds_discovery/sample/map_companies_fortune1000.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | null | null | null |
ds_discovery/sample/map_companies_fortune1000.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | 1
|
2021-07-23T13:52:04.000Z
|
2021-07-23T13:52:04.000Z
|
data={'title': ['Walmart', 'Exxon Mobil', 'Berkshire Hathaway', 'Apple', 'UnitedHealth Group', 'McKesson', 'CVS Health', 'Amazon.com', 'AT&T', 'General Motors', 'Ford Motor', 'AmerisourceBergen', 'Chevron', 'Cardinal Health', 'Costco', 'Verizon', 'Kroger', 'General Electric', 'Walgreens Boots Alliance', 'JPMorgan Chase', 'Fannie Mae', 'Alphabet', 'Home Depot', 'Bank of America Corp.', 'Express Scripts Holding', 'Wells Fargo', 'Boeing', 'Phillips 66', 'Anthem', 'Microsoft', 'Valero Energy', 'Citigroup', 'Comcast', 'IBM', 'Dell Technologies', 'State Farm Insurance Cos.', 'Johnson & Johnson', 'Freddie Mac', 'Target', 'Lowes', 'Marathon Petroleum', 'Procter & Gamble', 'MetLife', 'UPS', 'PepsiCo', 'Intel', 'DowDuPont', 'Archer Daniels Midland', 'Aetna', 'FedEx', 'United Technologies', 'Prudential Financial', 'Albertsons Cos.', 'Sysco', 'Disney', 'Humana', 'Pfizer', 'HP', 'Lockheed Martin', 'AIG', 'Centene', 'Cisco Systems', 'HCA Healthcare', 'Energy Transfer Equity', 'Caterpillar', 'Nationwide', 'Morgan Stanley', 'Liberty Mutual Insurance Group', 'New York Life Insurance', 'Goldman Sachs Group', 'American Airlines Group', 'Best Buy', 'Cigna', 'Charter Communications', 'Delta Air Lines', 'Facebook', 'Honeywell International', 'Merck', 'Allstate', 'Tyson Foods', 'United Continental Holdings', 'Oracle', 'Tech Data', 'TIAA', 'TJX', 'American Express', 'Coca-Cola', 'Publix Super Markets', 'Nike', 'Andeavor', 'World Fuel Services', 'Exelon', 'Massachusetts Mutual Life Insurance', 'Rite Aid', 'ConocoPhillips', 'CHS', '3M', 'Time Warner', 'General Dynamics', 'USAA', 'Capital One Financial', 'Deere', 'INTL FCStone', 'Northwestern Mutual', 'Enterprise Products Partners', 'Travelers Cos.', 'Hewlett Packard Enterprise', 'Philip Morris International', 'Twenty-First Century Fox', 'AbbVie', 'Abbott Laboratories', 'Progressive', 'Arrow Electronics', 'Kraft Heinz', 'Plains GP Holdings', 'Gilead Sciences', 'Mondelez International', 'Northrop Grumman', 'Raytheon', 'Macys', 'US Foods Holding', 'U.S. Bancorp', 'Dollar General', 'International Paper', 'Duke Energy', 'Southern', 'Marriott International', 'Avnet', 'Eli Lilly', 'Amgen', 'McDonalds', 'Starbucks', 'Qualcomm', 'Dollar Tree', 'PBF Energy', 'Icahn Enterprises', 'Aflac', 'AutoNation', 'Penske Automotive Group', 'Whirlpool', 'Union Pacific', 'Southwest Airlines', 'ManpowerGroup', 'Thermo Fisher Scientific', 'Bristol-Myers Squibb', 'Halliburton', 'Tenet Healthcare', 'Lear', 'Cummins', 'Micron Technology', 'Nucor', 'Molina Healthcare', 'Fluor', 'Altria Group', 'Paccar', 'Hartford Financial Services', 'Kohls', 'Western Digital', 'Jabil', 'Community Health Systems', 'Visa', 'Danaher', 'Kimberly-Clark', 'AECOM', 'PNC Financial Services', 'CenturyLink', 'NextEra Energy', 'PG& E Corp.', 'Synnex', 'WellCare Health Plans', 'Performance Food Group', 'Sears Holdings', 'Synchrony Financial', 'CarMax', 'Bank of New York Mellon', 'Freeport-McMoRan', 'Genuine Parts', 'Emerson Electric', 'DaVita', 'Supervalu', 'Gap', 'General Mills', 'Nordstrom', 'Colgate-Palmolive', 'American Electric Power', 'XPO Logistics', 'Goodyear Tire & Rubber', 'Omnicom Group', 'CDW', 'Sherwin-Williams', 'PPG Industries', 'Texas Instruments', 'C.H. Robinson Worldwide', 'WestRock', 'Cognizant Technology Solutions', 'Newell Brands', 'CBS', 'Envision Healthcare', 'Monsanto', 'Aramark', 'Applied Materials', 'Waste Management', 'DISH Network', 'Illinois Tool Works', 'Lincoln National', 'HollyFrontier', 'CBRE Group', 'Textron', 'Ross Stores', 'Principal Financial', 'D.R. Horton', 'Marsh & McLennan', 'Devon Energy', 'AES', 'Ecolab', "Land O'Lakes", 'Loews', 'Kinder Morgan', 'FirstEnergy', 'Occidental Petroleum', 'Viacom', 'PayPal Holdings', 'NGL Energy Partners', 'Celgene', 'Arconic', 'Kellogg', 'Las Vegas Sands', 'Stanley Black & Decker', 'Booking Holdings', 'Lennar', 'L Brands', 'DTE Energy', 'Dominion Energy', 'Reinsurance Group of America', 'J.C. Penney', 'Mastercard', 'BlackRock', 'Henry Schein', 'Guardian Life Ins. Co. of America', 'Stryker', 'Jefferies Financial Group', 'VF', 'ADP', 'Edison International', 'Biogen', 'United States Steel', 'Core-Mark Holding', 'Bed Bath & Beyond', 'Oneok', 'BB& T Corp.', 'Becton Dickinson', 'Ameriprise Financial', 'Farmers Insurance Exchange', 'First Data', 'Consolidated Edison', 'Parker-Hannifin', 'Anadarko Petroleum', 'Estee Lauder', 'State Street Corp.', 'Tesla', 'Netflix', 'Alcoa', 'Discover Financial Services', 'Praxair', 'CSX', 'Xcel Energy', 'Unum Group', 'Universal Health Services', 'NRG Energy', 'EOG Resources', 'Sempra Energy', "Toys 'R'ù Us", 'Group 1 Automotive', 'Entergy', 'Molson Coors Brewing', 'L3 Technologies', 'Ball', 'AutoZone', 'Murphy USA', 'MGM Resorts International', 'Office Depot', 'Huntsman', 'Baxter International', 'Norfolk Southern', 'salesforce.com', 'Laboratory Corp. of America', 'W.W. Grainger', 'Qurate Retail', 'Autoliv', 'Live Nation Entertainment', 'Xerox', 'Leidos Holdings', 'Corning', 'Lithia Motors', 'Expedia Group', 'Republic Services', 'Jacobs Engineering Group', 'Sonic Automotive', 'Ally Financial', 'LKQ', 'BorgWarner', 'Fidelity National Financial', 'SunTrust Banks', 'IQVIA Holdings', 'Reliance Steel & Aluminum', 'Nvidia', 'Voya Financial', 'CenterPoint Energy', 'eBay', 'Eastman Chemical', 'American Family Insurance Group', 'Steel Dynamics', 'Pacific Life', 'Chesapeake Energy', 'Mohawk Industries', 'Quanta Services', 'Advance Auto Parts', 'Owens & Minor', 'United Natural Foods', 'Tenneco', 'Conagra Brands', 'GameStop', 'Hormel Foods', 'Hilton Worldwide Holdings', 'Frontier Communications', 'Fidelity National Information Services', 'Public Service Enterprise Group', 'Boston Scientific', 'OReilly Automotive', 'Charles Schwab', 'Global Partners', 'PVH', 'Avis Budget Group', 'Targa Resources', 'Hertz Global Holdings', 'Calpine', 'Mutual of Omaha Insurance', 'Crown Holdings', 'Peter Kiewit Sons', 'Dicks Sporting Goods', 'PulteGroup', 'Navistar International', 'Thrivent Financial for Lutherans', 'DCP Midstream', 'Air Products & Chemicals', 'Veritiv', 'AGCO', 'Genworth Financial', 'Univar', 'News Corp.', 'SpartanNash', 'Westlake Chemical', 'Williams', 'Lam Research', 'Alaska Air Group', 'Jones Lang LaSalle', 'Anixter International', 'Campbell Soup', 'Interpublic Group', 'Dover', 'Zimmer Biomet Holdings', 'Dean Foods', 'Foot Locker', 'Eversource Energy', 'Alliance Data Systems', 'Fifth Third Bancorp', 'Quest Diagnostics', 'EMCOR Group', 'W.R. Berkley', 'WESCO International', 'Coty', 'WEC Energy Group', 'Masco', 'DXC Technology', 'Auto-Owners Insurance', 'Jones Financial (Edward Jones)', 'Liberty Media', 'Erie Insurance Group', 'Hershey', 'PPL', 'Huntington Ingalls Industries', 'Mosaic', 'J.M. Smucker', 'Delek US Holdings', 'Newmont Mining', 'Constellation Brands', 'Ryder System', 'National Oilwell Varco', 'Adobe Systems', 'LifePoint Health', 'Tractor Supply', 'Thor Industries', 'Dana', 'Weyerhaeuser', 'J.B. Hunt Transport Services', 'Darden Restaurants', 'Yum China Holdings', 'Blackstone Group', 'Berry Global Group', 'Builders FirstSource', 'Activision Blizzard', 'JetBlue Airways', 'Amphenol', 'A-Mark Precious Metals', 'Spirit AeroSystems Holdings', 'R.R. Donnelley & Sons', 'Harris', 'Expeditors Intl. of Washington', 'Discovery', 'Owens-Illinois', 'Sanmina', 'KeyCorp', 'American Financial Group', 'Oshkosh', 'Rockwell Collins', 'Kindred Healthcare', 'Insight Enterprises', 'Dr Pepper Snapple Group', 'American Tower', 'Fortive', 'Ralph Lauren', 'HRG Group', 'Ascena Retail Group', 'United Rentals', 'Caseys General Stores', 'Graybar Electric', 'Avery Dennison', 'MasTec', 'CMS Energy', 'HD Supply Holdings', 'Raymond James Financial', 'NCR', 'Hanesbrands', 'Asbury Automotive Group', 'Citizens Financial Group', 'Packaging Corp. of America', 'Alleghany', 'Apache', 'Dillards', 'Assurant', 'Franklin Resources', 'Owens Corning', 'Motorola Solutions', 'NVR', 'Rockwell Automation', 'TreeHouse Foods', 'Wynn Resorts', 'Olin', 'American Axle & Manufacturing', 'Old Republic International', 'Chemours', 'iHeartMedia', 'Ameren', 'Arthur J. Gallagher', 'Celanese', 'Sealed Air', 'UGI', 'Realogy Holdings', 'Burlington Stores', 'Regions Financial', 'AK Steel Holding', 'Securian Financial Group', 'S& P Global', 'Markel', 'TravelCenters of America', 'Conduent', 'M& T Bank Corp.', 'Clorox', 'AmTrust Financial Services', 'KKR', 'Ulta Beauty', 'Yum Brands', 'Regeneron Pharmaceuticals', 'Windstream Holdings', 'Magellan Health', 'Western & Southern Financial', 'Intercontinental Exchange', 'Ingredion', 'Wyndham Destinations', 'Toll Brothers', 'Seaboard', 'Booz Allen Hamilton', 'First American Financial', 'Cincinnati Financial', 'Avon Products', 'Northern Trust', 'Fiserv', 'Harley-Davidson', 'Cheniere Energy', 'Patterson', 'Peabody Energy', 'ON Semiconductor', 'Simon Property Group', 'Western Union', 'NetApp', 'Polaris Industries', 'Pioneer Natural Resources', 'ABM Industries', 'Vistra Energy', 'Cintas', 'Hess', 'Host Hotels & Resorts', 'Kelly Services', 'Genesis Healthcare', 'Michaels Cos.', 'Advanced Micro Devices', 'Zoetis', 'Williams-Sonoma', 'Fortune Brands Home & Security', 'Big Lots', 'Robert Half International', 'Post Holdings', 'Hasbro', 'Hanover Insurance Group', 'Navient', 'Intuit', 'Domtar', 'Marathon Oil', 'Cerner', 'Analog Devices', 'Telephone & Data Systems', 'Essendant', 'Sonoco Products', 'Juniper Networks', 'Commercial Metals', 'CSRA', 'Under Armour', 'RPM International', 'Total System Services', 'Levi Strauss', 'Brunswick', 'YRC Worldwide', 'Mattel', 'FM Global', 'NiSource', 'Caesars Entertainment', 'Electronic Arts', 'Dynegy', 'McCormick', 'T. Rowe Price', 'Orbital ATK', 'Tutor Perini', 'Brookdale Senior Living', 'Huntington Bancshares', 'Wayfair', 'Rush Enterprises', 'Xylem', 'Neiman Marcus Group', 'Hyatt Hotels', 'Sprouts Farmers Market', 'Diebold Nixdorf', 'Roper Technologies', 'Smart & Final Stores', 'CommScope Holding', 'Tapestry', 'Diplomat Pharmacy', 'Chipotle Mexican Grill', 'Agilent Technologies', 'Science Applications International', 'MDU Resources Group', 'Select Medical Holdings', 'Boise Cascade', 'National General Holdings', 'SCANA', 'Graphic Packaging Holding', 'Fastenal', 'Schneider National', 'Laureate Education', 'Beacon Roofing Supply', 'KB Home', 'Equinix', 'Terex', 'Crown Castle International', 'CACI International', 'Watsco', 'Coca-Cola Bottling', 'Welltower', 'ADT', 'Ametek', 'CNO Financial Group', 'Camping World Holdings', 'LPL Financial Holdings', 'Noble Energy', 'Bloomin Brands', 'Moodys', 'Symantec', 'Amkor Technology', 'Skechers U.S.A.', 'KBR', 'Tiffany', 'Torchmark', 'Broadridge Financial Solutions', 'Quad/Graphics', 'CF Industries Holdings', 'Carlisle', 'Silgan Holdings', 'Bemis', 'CA', 'Hub Group', 'Worldpay', 'Ingles Markets', 'Snap-on', 'Dentsply Sirona', 'Calumet Specialty Products', 'Global Payments', 'Encompass Health', 'Martin Marietta Materials', 'Nasdaq', 'Leggett & Platt', 'Universal Forest Products', 'Sally Beauty Holdings', 'Flowers Foods', 'Barnes & Noble', 'American Equity Investment Life', 'Vulcan Materials', 'Taylor Morrison Home', 'Westinghouse Air Brake', 'Crestwood Equity Partners', 'Iron Mountain', 'Lennox International', 'General Cable', 'American Eagle Outfitters', 'Church & Dwight', 'Platform Specialty Products', 'JELD-WEN Holding', 'OneMain Holdings', 'Colfax', 'Zebra Technologies', 'Andersons', 'TD Ameritrade Holding', 'Carlyle Group', 'Hubbell', 'Trinity Industries', 'Darling Ingredients', 'Flowserve', 'Antero Resources', 'Skyworks Solutions', 'Landstar System', 'Buckeye Partners', 'MRC Global', 'CME Group', 'Greif', 'Nexeo Solutions', 'Cooper-Standard Holdings', 'Urban Outfitters', 'LSC Communications', 'Sabre', 'Green Plains', 'Hexion', 'Stericycle', 'Warner Music Group', 'Ventas', 'ScanSource', 'Pinnacle West Capital', 'Scripps Networks Interactive', 'Alexion Pharmaceuticals', 'Pitney Bowes', 'CIT Group', 'Country Financial', 'CUNA Mutual Group', 'Triumph Group', 'TransDigm Group', 'Allegheny Technologies', 'Resolute Forest Products', 'Acuity Brands', 'Abercrombie & Fitch', 'KLA-Tencor', 'Weis Markets', 'Puget Energy', 'Mednax', 'Kar Auction Services', 'PolyOne', 'FMC', 'Edwards Lifesciences', 'Microchip Technology', 'Amerco', 'Mercury General', 'American National Insurance', 'Carters', 'International Flavors & Fragrances', 'Aarons', 'Alliant Energy', 'EQT', 'Monster Beverage', 'BMC Stock Holdings', 'Ryerson Holding', 'Equifax', 'Regal Beloit', 'Old Dominion Freight Line', 'American Water Works', 'BGC Partners', 'Brinks', 'Meritor', 'Sentry Insurance Group', 'Sanderson Farms', 'KapStone Paper & Packaging', 'Gartner', 'IAC/InterActiveCorp', 'Tailored Brands', 'WABCO Holdings', 'Insperity', 'Comerica', 'TriNet Group', 'Avaya Holdings', 'Ashland Global Holdings', 'Meritage Homes', 'SkyWest', 'USG', 'Southwestern Energy', 'Keysight Technologies', 'Regal Entertainment Group', 'Mutual of America Life Insurance', 'Paychex', 'Brinker International', 'Penn National Gaming', 'Gannett', 'Visteon', 'Pinnacle Foods', 'Intuitive Surgical', 'Continental Resources', 'Service Corp. International', 'Scientific Games', 'Albemarle', 'Atmos Energy', 'Hologic', 'H& R Block', 'Qorvo', 'Steelcase', 'Univision Communications', 'Worthington Industries', 'Timken', 'A.O. Smith', 'PriceSmart', 'Stifel Financial', 'Brown-Forman', 'Cinemark Holdings', 'Granite Construction', 'Dycom Industries', 'Clean Harbors', 'First Solar', 'Scotts Miracle-Gro', 'Cracker Barrel Old Country Store', 'Triple-S Management', 'First Republic Bank', 'ServiceMaster Global Holdings', 'PC Connection', 'Genesco', 'Medical Mutual of Ohio', 'MSC Industrial Direct', 'Legg Mason', 'Hyster-Yale Materials Handling', 'Apollo Global Management', 'Citrix Systems', 'Acadia Healthcare', 'Varian Medical Systems', 'Groupon', 'Aleris', 'Sprague Resources', 'Cooper Tire & Rubber', 'Hain Celestial Group', 'Penn Mutual Life Insurance', 'Colony NorthStar', 'ArcBest', 'Presidio', 'TRI Pointe Group', 'Annaly Capital Management', 'G-III Apparel Group', 'AMC Networks', 'Enable Midstream Partners', 'Ciena', 'DSW', 'Convergys', 'Park Hotels & Resorts', 'Pool', 'Fossil Group', 'Dominos Pizza', 'Crane', 'Caleres', 'Tempur Sealy International', 'Tetra Tech', 'Illumina', 'Valmont Industries', 'Hill-Rom Holdings', 'Unisys', 'Zions Bancorp.', 'Sinclair Broadcast Group', 'Louisiana-Pacific', 'Mettler-Toledo International', 'Synopsys', 'Kemper', 'Cabot', 'Great Plains Energy', 'Rent-A-Center', 'Hawaiian Holdings', 'Revlon', 'Syneos Health', 'Public Storage', 'TTM Technologies', 'Vectren', 'Trimble', 'NOW', 'Spirit Airlines', 'ASGN', 'Lincoln Electric Holdings', 'Prologis', 'Range Resources', 'Teledyne Technologies', 'Vishay Intertechnology', 'Boston Properties', 'Applied Industrial Technologies', 'Graham Holdings', 'Amica Mutual Insurance', 'Concho Resources', 'ITT', 'Kansas City Southern', 'MDC Holdings', 'Evergy', 'Pinnacle Entertainment', 'Hawaiian Electric Industries', 'TEGNA', 'Southwest Gas Holdings', 'Vista Outdoor', 'Bon-Ton Stores', 'Super Micro Computer', 'Plexus', 'TrueBlue', 'Magellan Midstream Partners', 'Toro', 'Akamai Technologies', 'Moog', 'Vertex Pharmaceuticals', 'Equity Residential', 'Selective Insurance Group', 'AptarGroup', 'Benchmark Electronics', 'Columbia Sportswear', 'A. Schulman', 'Verso', 'Digital Realty Trust', 'GNC Holdings', 'E*Trade Financial', 'Hovnanian Enterprises', 'Maximus', 'Twitter', 'Par Pacific Holdings', 'Parexel International', 'RH', 'Nexstar Media Group', 'Knight-Swift Transportation Holdings', 'Red Hat', 'Belden', 'Boyd Gaming', 'Primoris Services', 'Gardner Denver', 'Donaldson', 'Party City Holdco', 'J.Crew Group', 'EnerSys', 'Guess', 'Patterson-UTI Energy', 'WGL Holdings', 'Wolverine World Wide', 'Xilinx', 'Vornado Realty Trust', 'Middleby', 'MPM Holdings', 'Cleveland-Cliffs', 'GGP', 'Cypress Semiconductor', 'Arch Coal', 'GMS', 'Waters', 'H.B. Fuller', 'Affiliated Managers Group', 'PerkinElmer', 'Edgewell Personal Care', 'Maxim Integrated Products', 'Knights of Columbus', 'IDEX', 'DST Systems', 'Chicos FAS', 'Nu Skin Enterprises', 'Herman Miller', 'NLV Financial', 'Curtiss-Wright', 'New Jersey Resources', 'REV Group', 'Mueller Industries', 'GEO Group', 'Allison Transmission Holdings', 'OGE Energy', 'Cheesecake Factory', 'PRA Health Sciences', 'Tupperware Brands', 'Euronet Worldwide', 'FLEETCOR Technologies', 'Nationstar Mortgage Holdings', 'GoDaddy', 'Blackhawk Network Holdings', 'Cboe Global Markets', 'Snyders-Lance', 'Murphy Oil', 'CDK Global', 'Texas Roadhouse', 'Kirby', 'Square', 'Genesee & Wyoming', 'Zayo Group Holdings', 'NewMarket', '99 Cents Only Stores', 'PCM', 'Federated Mutual Insurance', 'HNI', 'Hospitality Properties Trust', 'Greenbrier Cos.', 'Bio-Rad Laboratories', 'AvalonBay Communities', 'Renewable Energy Group', 'Atlas Air Worldwide Holdings', 'Teradata', 'LCI Industries', 'Teleflex', 'Verisk Analytics', 'Popular', 'Workday', 'Cooper Cos.', 'Express', 'Teradyne', 'Werner Enterprises', 'Oaktree Capital Group', 'Woodward', 'F5 Networks', 'Valvoline', 'Roadrunner Transportation Systems', 'SemGroup', 'Catalent', 'Quorum Health', 'Universal', 'Nordson', 'ResMed', 'Tower International', 'Freds', 'Foundation Building Materials', 'Kennametal', 'Autodesk', 'Ply Gem Holdings', 'Central Garden & Pet', 'Matson', 'EchoStar', 'Genesis Energy', 'SVB Financial Group', 'Itron', 'Portland General Electric', 'California Resources', 'Esterline Technologies', 'Delta Tucker Holdings', 'AMN Healthcare Services', 'Griffon', 'Valhi', 'Hexcel', 'IDEXX Laboratories', 'Deluxe', 'M/I Homes', 'Kraton', 'Stewart Information Services', 'Marriott Vacations Worldwide', 'SPX FLOW', 'ACCO Brands', 'Echo Global Logistics', 'Cadence Design Systems', 'Nuance Communications', 'Finish Line', 'TransUnion', 'ServiceNow', 'Summit Materials', 'Engility Holdings', 'Ferrellgas Partners', 'Interactive Brokers Group', 'Stepan', 'Oceaneering International', 'Cimarex Energy', 'Rexnord', 'Beazer Homes USA', 'MKS Instruments', 'Vail Resorts', 'Ohio National Mutual', 'TopBuild', 'Brown & Brown', 'Aerojet Rocketdyne Holdings', 'Barnes & Noble Education', 'Superior Energy Services', 'VeriFone Systems', 'Childrens Place', 'Tribune Media', 'Healthcare Services Group', 'SiteOne Landscape Supply', 'Charles River Laboratories Intl', 'CoreLogic', 'Ensign Group', 'HCP'], 'Sector': ['Retailing', 'Energy', 'Financials', 'Technology', 'Health Care', 'Wholesalers', 'Health Care', 'Retailing', 'Telecommunications', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Wholesalers', 'Energy', 'Wholesalers', 'Retailing', 'Telecommunications', 'Food & Drug Stores', 'Industrials', 'Food & Drug Stores', 'Financials', 'Financials', 'Technology', 'Retailing', 'Financials', 'Health Care', 'Financials', 'Aerospace & Defense', 'Energy', 'Health Care', 'Technology', 'Energy', 'Financials', 'Telecommunications', 'Technology', 'Technology', 'Financials', 'Health Care', 'Financials', 'Retailing', 'Retailing', 'Energy', 'Household Products', 'Financials', 'Transportation', 'Food, Beverages & Tobacco', 'Technology', 'Chemicals', 'Food, Beverages & Tobacco', 'Health Care', 'Transportation', 'Aerospace & Defense', 'Financials', 'Food & Drug Stores', 'Wholesalers', 'Media', 'Health Care', 'Health Care', 'Technology', 'Aerospace & Defense', 'Financials', 'Health Care', 'Technology', 'Health Care', 'Energy', 'Industrials', 'Financials', 'Financials', 'Financials', 'Financials', 'Financials', 'Transportation', 'Retailing', 'Health Care', 'Telecommunications', 'Transportation', 'Technology', 'Industrials', 'Health Care', 'Financials', 'Food, Beverages & Tobacco', 'Transportation', 'Technology', 'Wholesalers', 'Financials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Food & Drug Stores', 'Apparel', 'Energy', 'Energy', 'Energy', 'Financials', 'Food & Drug Stores', 'Energy', 'Food, Beverages & Tobacco', 'Industrials', 'Media', 'Aerospace & Defense', 'Financials', 'Financials', 'Industrials', 'Financials', 'Financials', 'Energy', 'Financials', 'Technology', 'Food, Beverages & Tobacco', 'Media', 'Health Care', 'Health Care', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Energy', 'Health Care', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Aerospace & Defense', 'Retailing', 'Wholesalers', 'Financials', 'Retailing', 'Materials', 'Energy', 'Energy', 'Hotels, Restaurants & Leisure', 'Wholesalers', 'Health Care', 'Health Care', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Technology', 'Retailing', 'Energy', 'Financials', 'Financials', 'Retailing', 'Retailing', 'Industrials', 'Transportation', 'Transportation', 'Business Services', 'Technology', 'Health Care', 'Energy', 'Health Care', 'Motor Vehicles & Parts', 'Industrials', 'Technology', 'Materials', 'Health Care', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Industrials', 'Financials', 'Retailing', 'Technology', 'Technology', 'Health Care', 'Business Services', 'Health Care', 'Household Products', 'Engineering & Construction', 'Financials', 'Telecommunications', 'Energy', 'Energy', 'Wholesalers', 'Health Care', 'Wholesalers', 'Retailing', 'Financials', 'Retailing', 'Financials', 'Energy', 'Wholesalers', 'Industrials', 'Health Care', 'Food & Drug Stores', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Household Products', 'Energy', 'Transportation', 'Motor Vehicles & Parts', 'Business Services', 'Technology', 'Chemicals', 'Chemicals', 'Technology', 'Transportation', 'Materials', 'Technology', 'Household Products', 'Media', 'Health Care', 'Chemicals', 'Business Services', 'Technology', 'Business Services', 'Telecommunications', 'Industrials', 'Financials', 'Energy', 'Financials', 'Aerospace & Defense', 'Retailing', 'Financials', 'Engineering & Construction', 'Financials', 'Energy', 'Energy', 'Chemicals', 'Food, Beverages & Tobacco', 'Financials', 'Energy', 'Energy', 'Energy', 'Media', 'Business Services', 'Energy', 'Health Care', 'Aerospace & Defense', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Household Products', 'Technology', 'Engineering & Construction', 'Retailing', 'Energy', 'Energy', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Financials', 'Health Care', 'Financials', 'Apparel', 'Business Services', 'Energy', 'Health Care', 'Materials', 'Wholesalers', 'Retailing', 'Energy', 'Financials', 'Health Care', 'Financials', 'Financials', 'Business Services', 'Energy', 'Industrials', 'Energy', 'Household Products', 'Financials', 'Motor Vehicles & Parts', 'Technology', 'Materials', 'Financials', 'Chemicals', 'Transportation', 'Energy', 'Financials', 'Health Care', 'Energy', 'Energy', 'Energy', 'Retailing', 'Retailing', 'Energy', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Materials', 'Retailing', 'Retailing', 'Hotels, Restaurants & Leisure', 'Retailing', 'Chemicals', 'Health Care', 'Transportation', 'Technology', 'Health Care', 'Wholesalers', 'Retailing', 'Motor Vehicles & Parts', 'Media', 'Technology', 'Technology', 'Industrials', 'Retailing', 'Retailing', 'Business Services', 'Engineering & Construction', 'Retailing', 'Financials', 'Wholesalers', 'Motor Vehicles & Parts', 'Financials', 'Financials', 'Health Care', 'Materials', 'Technology', 'Financials', 'Energy', 'Technology', 'Chemicals', 'Financials', 'Materials', 'Financials', 'Energy', 'Household Products', 'Engineering & Construction', 'Retailing', 'Wholesalers', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Retailing', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Telecommunications', 'Business Services', 'Energy', 'Health Care', 'Retailing', 'Financials', 'Wholesalers', 'Apparel', 'Retailing', 'Energy', 'Retailing', 'Energy', 'Financials', 'Materials', 'Engineering & Construction', 'Retailing', 'Engineering & Construction', 'Industrials', 'Financials', 'Energy', 'Chemicals', 'Wholesalers', 'Industrials', 'Financials', 'Wholesalers', 'Media', 'Wholesalers', 'Chemicals', 'Energy', 'Technology', 'Transportation', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Business Services', 'Industrials', 'Health Care', 'Food, Beverages & Tobacco', 'Retailing', 'Energy', 'Business Services', 'Financials', 'Health Care', 'Engineering & Construction', 'Financials', 'Wholesalers', 'Household Products', 'Energy', 'Household Products', 'Technology', 'Financials', 'Financials', 'Media', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Aerospace & Defense', 'Chemicals', 'Food, Beverages & Tobacco', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Transportation', 'Energy', 'Technology', 'Health Care', 'Retailing', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Materials', 'Transportation', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Financials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Technology', 'Materials', 'Aerospace & Defense', 'Media', 'Aerospace & Defense', 'Transportation', 'Media', 'Materials', 'Technology', 'Financials', 'Financials', 'Industrials', 'Aerospace & Defense', 'Health Care', 'Technology', 'Food, Beverages & Tobacco', 'Financials', 'Industrials', 'Apparel', 'Household Products', 'Retailing', 'Business Services', 'Retailing', 'Wholesalers', 'Materials', 'Engineering & Construction', 'Energy', 'Wholesalers', 'Financials', 'Technology', 'Apparel', 'Retailing', 'Financials', 'Materials', 'Financials', 'Energy', 'Retailing', 'Financials', 'Financials', 'Materials', 'Technology', 'Engineering & Construction', 'Industrials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Motor Vehicles & Parts', 'Financials', 'Chemicals', 'Media', 'Energy', 'Financials', 'Chemicals', 'Materials', 'Energy', 'Financials', 'Retailing', 'Financials', 'Materials', 'Financials', 'Business Services', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Household Products', 'Financials', 'Financials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Health Care', 'Telecommunications', 'Health Care', 'Financials', 'Financials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Technology', 'Financials', 'Financials', 'Household Products', 'Financials', 'Business Services', 'Transportation', 'Energy', 'Wholesalers', 'Energy', 'Technology', 'Financials', 'Business Services', 'Technology', 'Transportation', 'Energy', 'Business Services', 'Energy', 'Business Services', 'Energy', 'Financials', 'Business Services', 'Health Care', 'Retailing', 'Technology', 'Health Care', 'Retailing', 'Household Products', 'Retailing', 'Business Services', 'Food, Beverages & Tobacco', 'Household Products', 'Financials', 'Financials', 'Technology', 'Materials', 'Energy', 'Health Care', 'Technology', 'Telecommunications', 'Wholesalers', 'Materials', 'Technology', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Business Services', 'Apparel', 'Transportation', 'Transportation', 'Household Products', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Energy', 'Food, Beverages & Tobacco', 'Financials', 'Aerospace & Defense', 'Engineering & Construction', 'Health Care', 'Financials', 'Technology', 'Retailing', 'Industrials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Food & Drug Stores', 'Technology', 'Technology', 'Food & Drug Stores', 'Technology', 'Apparel', 'Health Care', 'Hotels, Restaurants & Leisure', 'Technology', 'Technology', 'Energy', 'Health Care', 'Wholesalers', 'Financials', 'Energy', 'Materials', 'Wholesalers', 'Transportation', 'Business Services', 'Wholesalers', 'Engineering & Construction', 'Financials', 'Industrials', 'Financials', 'Technology', 'Wholesalers', 'Food, Beverages & Tobacco', 'Financials', 'Business Services', 'Technology', 'Financials', 'Retailing', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Business Services', 'Technology', 'Technology', 'Apparel', 'Engineering & Construction', 'Retailing', 'Financials', 'Business Services', 'Media', 'Chemicals', 'Materials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Business Services', 'Food & Drug Stores', 'Industrials', 'Health Care', 'Energy', 'Business Services', 'Health Care', 'Materials', 'Financials', 'Household Products', 'Materials', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Financials', 'Materials', 'Engineering & Construction', 'Industrials', 'Energy', 'Business Services', 'Industrials', 'Industrials', 'Retailing', 'Household Products', 'Chemicals', 'Materials', 'Financials', 'Industrials', 'Industrials', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Industrials', 'Transportation', 'Food, Beverages & Tobacco', 'Industrials', 'Energy', 'Technology', 'Transportation', 'Energy', 'Energy', 'Financials', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Retailing', 'Media', 'Technology', 'Energy', 'Chemicals', 'Business Services', 'Media', 'Financials', 'Wholesalers', 'Energy', 'Media', 'Health Care', 'Technology', 'Financials', 'Financials', 'Financials', 'Aerospace & Defense', 'Aerospace & Defense', 'Materials', 'Materials', 'Industrials', 'Retailing', 'Technology', 'Food & Drug Stores', 'Energy', 'Health Care', 'Wholesalers', 'Chemicals', 'Chemicals', 'Health Care', 'Technology', 'Transportation', 'Financials', 'Financials', 'Apparel', 'Chemicals', 'Retailing', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Wholesalers', 'Materials', 'Business Services', 'Industrials', 'Transportation', 'Energy', 'Financials', 'Business Services', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Materials', 'Technology', 'Technology', 'Retailing', 'Motor Vehicles & Parts', 'Business Services', 'Financials', 'Business Services', 'Technology', 'Chemicals', 'Engineering & Construction', 'Transportation', 'Materials', 'Energy', 'Technology', 'Media', 'Financials', 'Business Services', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Media', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Health Care', 'Energy', 'Business Services', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Energy', 'Health Care', 'Financials', 'Technology', 'Household Products', 'Media', 'Materials', 'Industrials', 'Industrials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Media', 'Engineering & Construction', 'Engineering & Construction', 'Business Services', 'Energy', 'Chemicals', 'Hotels, Restaurants & Leisure', 'Health Care', 'Financials', 'Business Services', 'Retailing', 'Retailing', 'Financials', 'Wholesalers', 'Financials', 'Industrials', 'Financials', 'Technology', 'Health Care', 'Health Care', 'Technology', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Transportation', 'Technology', 'Engineering & Construction', 'Financials', 'Apparel', 'Media', 'Energy', 'Technology', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Apparel', 'Hotels, Restaurants & Leisure', 'Industrials', 'Retailing', 'Household Products', 'Engineering & Construction', 'Technology', 'Materials', 'Health Care', 'Technology', 'Financials', 'Media', 'Materials', 'Technology', 'Technology', 'Financials', 'Chemicals', 'Energy', 'Retailing', 'Transportation', 'Household Products', 'Health Care', 'Financials', 'Technology', 'Energy', 'Technology', 'Wholesalers', 'Transportation', 'Business Services', 'Industrials', 'Financials', 'Energy', 'Aerospace & Defense', 'Technology', 'Financials', 'Wholesalers', 'Business Services', 'Financials', 'Energy', 'Industrials', 'Transportation', 'Engineering & Construction', 'Energy', 'Hotels, Restaurants & Leisure', 'Energy', 'Media', 'Energy', 'Household Products', 'Retailing', 'Technology', 'Technology', 'Business Services', 'Energy', 'Industrials', 'Technology', 'Aerospace & Defense', 'Health Care', 'Financials', 'Financials', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Materials', 'Financials', 'Food & Drug Stores', 'Financials', 'Engineering & Construction', 'Technology', 'Technology', 'Energy', 'Health Care', 'Retailing', 'Media', 'Transportation', 'Technology', 'Industrials', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Industrials', 'Industrials', 'Retailing', 'Retailing', 'Industrials', 'Retailing', 'Energy', 'Energy', 'Apparel', 'Technology', 'Financials', 'Industrials', 'Chemicals', 'Energy', 'Financials', 'Technology', 'Energy', 'Wholesalers', 'Technology', 'Chemicals', 'Financials', 'Technology', 'Household Products', 'Technology', 'Financials', 'Industrials', 'Business Services', 'Retailing', 'Household Products', 'Household Products', 'Financials', 'Aerospace & Defense', 'Energy', 'Motor Vehicles & Parts', 'Industrials', 'Business Services', 'Motor Vehicles & Parts', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Household Products', 'Business Services', 'Business Services', 'Financials', 'Technology', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Technology', 'Hotels, Restaurants & Leisure', 'Transportation', 'Business Services', 'Transportation', 'Telecommunications', 'Chemicals', 'Retailing', 'Wholesalers', 'Financials', 'Household Products', 'Financials', 'Transportation', 'Health Care', 'Financials', 'Energy', 'Transportation', 'Technology', 'Motor Vehicles & Parts', 'Health Care', 'Business Services', 'Financials', 'Technology', 'Health Care', 'Retailing', 'Technology', 'Transportation', 'Financials', 'Aerospace & Defense', 'Technology', 'Chemicals', 'Transportation', 'Energy', 'Health Care', 'Health Care', 'Food, Beverages & Tobacco', 'Industrials', 'Health Care', 'Motor Vehicles & Parts', 'Food & Drug Stores', 'Wholesalers', 'Industrials', 'Technology', 'Materials', 'Household Products', 'Transportation', 'Technology', 'Energy', 'Financials', 'Industrials', 'Energy', 'Energy', 'Aerospace & Defense', 'Aerospace & Defense', 'Health Care', 'Materials', 'Chemicals', 'Aerospace & Defense', 'Health Care', 'Media', 'Engineering & Construction', 'Chemicals', 'Financials', 'Hotels, Restaurants & Leisure', 'Industrials', 'Household Products', 'Transportation', 'Technology', 'Technology', 'Retailing', 'Business Services', 'Technology', 'Materials', 'Aerospace & Defense', 'Energy', 'Financials', 'Chemicals', 'Energy', 'Energy', 'Industrials', 'Engineering & Construction', 'Technology', 'Hotels, Restaurants & Leisure', 'Financials', 'Engineering & Construction', 'Financials', 'Aerospace & Defense', 'Retailing', 'Energy', 'Technology', 'Retailing', 'Media', 'Health Care', 'Wholesalers', 'Health Care', 'Business Services', 'Health Care', 'Financials'], 'Industry': ['General Merchandisers', 'Petroleum Refining', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Health Care', 'Health Care: Pharmacy and Other Services', 'Internet Services and Retailing', 'Telecommunications', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Wholesalers: Health Care', 'Petroleum Refining', 'Wholesalers: Health Care', 'General Merchandisers', 'Telecommunications', 'Food and Drug Stores', 'Industrial Machinery', 'Food and Drug Stores', 'Commercial Banks', 'Diversified Financials', 'Internet Services and Retailing', 'Specialty Retailers: Other', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Commercial Banks', 'Aerospace and Defense', 'Petroleum Refining', 'Health Care: Insurance and Managed Care', 'Computer Software', 'Petroleum Refining', 'Commercial Banks', 'Telecommunications', 'Information Technology Services', 'Computers, Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Pharmaceuticals', 'Diversified Financials', 'General Merchandisers', 'Specialty Retailers: Other', 'Petroleum Refining', 'Household and Personal Products', 'Insurance: Life, Health (stock)', 'Mail, Package, and Freight Delivery', 'Food Consumer Products', 'Semiconductors and Other Electronic Components', 'Chemicals', 'Food Production', 'Health Care: Insurance and Managed Care', 'Mail, Package, and Freight Delivery', 'Aerospace and Defense', 'Insurance: Life, Health (stock)', 'Food and Drug Stores', 'Wholesalers: Food and Grocery', 'Entertainment', 'Health Care: Insurance and Managed Care', 'Pharmaceuticals', 'Computers, Office Equipment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Health Care: Insurance and Managed Care', 'Network and Other Communications Equipment', 'Health Care: Medical Facilities', 'Pipelines', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Mutual)', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (Mutual)', 'Commercial Banks', 'Airlines', 'Specialty Retailers: Other', 'Health Care: Insurance and Managed Care', 'Telecommunications', 'Airlines', 'Internet Services and Retailing', 'Electronics, Electrical Equip.', 'Pharmaceuticals', 'Insurance: Property and Casualty (Stock)', 'Food Production', 'Airlines', 'Computer Software', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Life, Health (Mutual)', 'Specialty Retailers: Apparel', 'Diversified Financials', 'Beverages', 'Food and Drug Stores', 'Apparel', 'Petroleum Refining', 'Energy', 'Utilities: Gas and Electric', 'Insurance: Life, Health (Mutual)', 'Food and Drug Stores', 'Mining, Crude-Oil Production', 'Food Production', 'Miscellaneous', 'Entertainment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Construction and Farm Machinery', 'Diversified Financials', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Tobacco', 'Entertainment', 'Pharmaceuticals', 'Medical Products and Equipment', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Pipelines', 'Pharmaceuticals', 'Food Consumer Products', 'Aerospace and Defense', 'Aerospace and Defense', 'General Merchandisers', 'Wholesalers: Food and Grocery', 'Commercial Banks', 'Specialty Retailers: Other', 'Packaging, Containers', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Wholesalers: Electronics and Office Equipment', 'Pharmaceuticals', 'Pharmaceuticals', 'Food Services', 'Food Services', 'Semiconductors and Other Electronic Components', 'Specialty Retailers: Other', 'Petroleum Refining', 'Diversified Financials', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Automotive Retailing, Services', 'Electronics, Electrical Equip.', 'Railroads', 'Airlines', 'Temporary Help', 'Scientific,Photographic and Control Equipment', 'Pharmaceuticals', 'Oil and Gas Equipment, Services', 'Health Care: Medical Facilities', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Semiconductors and Other Electronic Components', 'Metals', 'Health Care: Insurance and Managed Care', 'Engineering, Construction', 'Tobacco', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Stock)', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Health Care: Medical Facilities', 'Financial Data Services', 'Medical Products and Equipment', 'Household and Personal Products', 'Engineering, Construction', 'Commercial Banks', 'Telecommunications', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Wholesalers: Electronics and Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Food and Grocery', 'General Merchandisers', 'Diversified Financials', 'Automotive Retailing, Services', 'Commercial Banks', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Industrial Machinery', 'Health Care: Medical Facilities', 'Food and Drug Stores', 'Specialty Retailers: Apparel', 'Food Consumer Products', 'General Merchandisers', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Transportation and Logistics', 'Motor Vehicles and Parts', 'Advertising, marketing', 'Information Technology Services', 'Chemicals', 'Chemicals', 'Semiconductors and Other Electronic Components', 'Transportation and Logistics', 'Packaging, Containers', 'Information Technology Services', 'Home Equipment, Furnishings', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Chemicals', 'Diversified Outsourcing Services', 'Semiconductors and Other Electronic Components', 'Waste Management', 'Telecommunications', 'Industrial Machinery', 'Insurance: Life, Health (stock)', 'Petroleum Refining', 'Real estate', 'Aerospace and Defense', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (stock)', 'Homebuilders', 'Diversified Financials', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Chemicals', 'Food Consumer Products', 'Insurance: Property and Casualty (Stock)', 'Pipelines', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Entertainment', 'Financial Data Services', 'Energy', 'Pharmaceuticals', 'Aerospace and Defense', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Home Equipment, Furnishings', 'Internet Services and Retailing', 'Homebuilders', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'General Merchandisers', 'Financial Data Services', 'Securities', 'Wholesalers: Health Care', 'Insurance: Life, Health (Mutual)', 'Medical Products and Equipment', 'Diversified Financials', 'Apparel', 'Diversified Outsourcing Services', 'Utilities: Gas and Electric', 'Pharmaceuticals', 'Metals', 'Wholesalers: Food and Grocery', 'Specialty Retailers: Other', 'Pipelines', 'Commercial Banks', 'Medical Products and Equipment', 'Diversified Financials', 'Insurance: Property and Casualty (Mutual)', 'Financial Data Services', 'Utilities: Gas and Electric', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Household and Personal Products', 'Commercial Banks', 'Motor Vehicles and Parts', 'Internet Services and Retailing', 'Metals', 'Commercial Banks', 'Chemicals', 'Railroads', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'Health Care: Medical Facilities', 'Energy', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Automotive Retailing, Services', 'Utilities: Gas and Electric', 'Beverages', 'Aerospace and Defense', 'Packaging, Containers', 'Specialty Retailers: Other', 'Specialty Retailers: Other', 'Hotels, Casinos, Resorts', 'Specialty Retailers: Other', 'Chemicals', 'Medical Products and Equipment', 'Railroads', 'Computer Software', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Internet Services and Retailing', 'Motor Vehicles and Parts', 'Entertainment', 'Computers, Office Equipment', 'Information Technology Services', 'Electronics, Electrical Equip.', 'Automotive Retailing, Services', 'Internet Services and Retailing', 'Waste Management', 'Engineering, Construction', 'Automotive Retailing, Services', 'Diversified Financials', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Metals', 'Semiconductors and Other Electronic Components', 'Diversified Financials', 'Utilities: Gas and Electric', 'Internet Services and Retailing', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Metals', 'Insurance: Life, Health (stock)', 'Mining, Crude-Oil Production', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Specialty Retailers: Other', 'Wholesalers: Health Care', 'Wholesalers: Food and Grocery', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Specialty Retailers: Other', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Telecommunications', 'Financial Data Services', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Specialty Retailers: Other', 'Securities', 'Wholesalers: Diversified', 'Apparel', 'Automotive Retailing, Services', 'Pipelines', 'Automotive Retailing, Services', 'Energy', 'Insurance: Life, Health (stock)', 'Packaging, Containers', 'Engineering, Construction', 'Specialty Retailers: Other', 'Homebuilders', 'Construction and Farm Machinery', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Chemicals', 'Wholesalers: Diversified', 'Construction and Farm Machinery', 'Insurance: Life, Health (stock)', 'Wholesalers: Diversified', 'Publishing, Printing', 'Wholesalers: Food and Grocery', 'Chemicals', 'Energy', 'Semiconductors and Other Electronic Components', 'Airlines', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Advertising, marketing', 'Industrial Machinery', 'Medical Products and Equipment', 'Food Consumer Products', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Financial Data Services', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Diversified', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Home Equipment, Furnishings', 'Information Technology Services', 'Insurance: Property and Casualty (Mutual)', 'Securities', 'Entertainment', 'Insurance: Property and Casualty (Mutual)', 'Food Consumer Products', 'Utilities: Gas and Electric', 'Aerospace and Defense', 'Chemicals', 'Food Consumer Products', 'Petroleum Refining', 'Mining, Crude-Oil Production', 'Beverages', 'Trucking, Truck Leasing', 'Oil and Gas Equipment, Services', 'Computer Software', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Forest and Paper Products', 'Trucking, Truck Leasing', 'Food Services', 'Food Services', 'Diversified Financials', 'Packaging, Containers', 'Building Materials, Glass', 'Entertainment', 'Airlines', 'Network and Other Communications Equipment', 'Miscellaneous', 'Aerospace and Defense', 'Publishing, Printing', 'Aerospace and Defense', 'Transportation and Logistics', 'Entertainment', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Construction and Farm Machinery', 'Aerospace and Defense', 'Health Care: Medical Facilities', 'Information Technology Services', 'Beverages', 'Real estate', 'Industrial Machinery', 'Apparel', 'Household and Personal Products', 'Specialty Retailers: Apparel', 'Miscellaneous', 'Specialty Retailers: Other', 'Wholesalers: Diversified', 'Packaging, Containers', 'Engineering, Construction', 'Utilities: Gas and Electric', 'Wholesalers: Diversified', 'Securities', 'Computers, Office Equipment', 'Apparel', 'Automotive Retailing, Services', 'Commercial Banks', 'Packaging, Containers', 'Insurance: Property and Casualty (Stock)', 'Mining, Crude-Oil Production', 'General Merchandisers', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Building Materials, Glass', 'Network and Other Communications Equipment', 'Homebuilders', 'Electronics, Electrical Equip.', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Chemicals', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Entertainment', 'Utilities: Gas and Electric', 'Diversified Financials', 'Chemicals', 'Packaging, Containers', 'Energy', 'Real estate', 'Specialty Retailers: Apparel', 'Commercial Banks', 'Metals', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Insurance: Property and Casualty (Stock)', 'Specialty Retailers: Other', 'Diversified Outsourcing Services', 'Commercial Banks', 'Household and Personal Products', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Specialty Retailers: Other', 'Food Services', 'Pharmaceuticals', 'Telecommunications', 'Health Care: Insurance and Managed Care', 'Insurance: Life, Health (Mutual)', 'Securities', 'Food Production', 'Hotels, Casinos, Resorts', 'Homebuilders', 'Food Production', 'Information Technology Services', 'Insurance: Property and Casualty (Stock)', 'Insurance: Property and Casualty (Stock)', 'Household and Personal Products', 'Commercial Banks', 'Financial Data Services', 'Transportation Equipment', 'Energy', 'Wholesalers: Health Care', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Real estate', 'Financial Data Services', 'Computers, Office Equipment', 'Transportation Equipment', 'Mining, Crude-Oil Production', 'Diversified Outsourcing Services', 'Energy', 'Diversified Outsourcing Services', 'Mining, Crude-Oil Production', 'Real estate', 'Temporary Help', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Semiconductors and Other Electronic Components', 'Pharmaceuticals', 'Specialty Retailers: Other', 'Home Equipment, Furnishings', 'Specialty Retailers: Other', 'Temporary Help', 'Food Consumer Products', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Diversified Financials', 'Computer Software', 'Forest and Paper Products', 'Mining, Crude-Oil Production', 'Health Care: Pharmacy and Other Services', 'Semiconductors and Other Electronic Components', 'Telecommunications', 'Wholesalers: Electronics and Office Equipment', 'Packaging, Containers', 'Network and Other Communications Equipment', 'Metals', 'Information Technology Services', 'Apparel', 'Chemicals', 'Financial Data Services', 'Apparel', 'Transportation Equipment', 'Trucking, Truck Leasing', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Entertainment', 'Energy', 'Food Consumer Products', 'Securities', 'Aerospace and Defense', 'Engineering, Construction', 'Health Care: Medical Facilities', 'Commercial Banks', 'Internet Services and Retailing', 'Automotive Retailing, Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Hotels, Casinos, Resorts', 'Food and Drug Stores', 'Computers, Office Equipment', 'Scientific,Photographic and Control Equipment', 'Food and Drug Stores', 'Network and Other Communications Equipment', 'Apparel', 'Health Care: Pharmacy and Other Services', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Information Technology Services', 'Energy', 'Health Care: Medical Facilities', 'Wholesalers: Diversified', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Packaging, Containers', 'Wholesalers: Diversified', 'Trucking, Truck Leasing', 'Education', 'Wholesalers: Diversified', 'Homebuilders', 'Real estate', 'Construction and Farm Machinery', 'Real estate', 'Information Technology Services', 'Wholesalers: Diversified', 'Beverages', 'Real estate', 'Diversified Outsourcing Services', 'Scientific,Photographic and Control Equipment', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Securities', 'Mining, Crude-Oil Production', 'Food Services', 'Financial Data Services', 'Computer Software', 'Semiconductors and Other Electronic Components', 'Apparel', 'Engineering, Construction', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Publishing, Printing', 'Chemicals', 'Building Materials, Glass', 'Packaging, Containers', 'Packaging, Containers', 'Computer Software', 'Transportation and Logistics', 'Financial Data Services', 'Food and Drug Stores', 'Industrial Machinery', 'Medical Products and Equipment', 'Petroleum Refining', 'Financial Data Services', 'Health Care: Medical Facilities', 'Building Materials, Glass', 'Securities', 'Home Equipment, Furnishings', 'Building Materials, Glass', 'Specialty Retailers: Other', 'Food Consumer Products', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Building Materials, Glass', 'Homebuilders', 'Industrial Machinery', 'Energy', 'Diversified Outsourcing Services', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Chemicals', 'Building Materials, Glass', 'Diversified Financials', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Food Production', 'Securities', 'Securities', 'Electronics, Electrical Equip.', 'Transportation Equipment', 'Food Production', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Pipelines', 'Oil and Gas Equipment, Services', 'Securities', 'Packaging, Containers', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Specialty Retailers: Apparel', 'Publishing, Printing', 'Internet Services and Retailing', 'Energy', 'Chemicals', 'Waste Management', 'Entertainment', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Utilities: Gas and Electric', 'Entertainment', 'Pharmaceuticals', 'Computers, Office Equipment', 'Commercial Banks', 'Insurance: Property and Casualty (Mutual)', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Aerospace and Defense', 'Metals', 'Forest and Paper Products', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Food and Drug Stores', 'Utilities: Gas and Electric', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Chemicals', 'Chemicals', 'Medical Products and Equipment', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (stock)', 'Apparel', 'Chemicals', 'Specialty Retailers: Other', 'Utilities: Gas and Electric', 'Energy', 'Beverages', 'Wholesalers: Diversified', 'Metals', 'Financial Data Services', 'Electronics, Electrical Equip.', 'Trucking, Truck Leasing', 'Miscellaneous', 'Securities', 'Diversified Outsourcing Services', 'Diversified Outsourcing Services', 'Insurance: Property and Casualty (Mutual)', 'Food Production', 'Packaging, Containers', 'Information Technology Services', 'Internet Services and Retailing', 'Specialty Retailers: Apparel', 'Motor Vehicles and Parts', 'Diversified Outsourcing Services', 'Commercial Banks', 'Diversified Outsourcing Services', 'Information Technology Services', 'Chemicals', 'Homebuilders', 'Airlines', 'Building Materials, Glass', 'Mining, Crude-Oil Production', 'Scientific,Photographic and Control Equipment', 'Entertainment', 'Insurance: Life, Health (Mutual)', 'Diversified Outsourcing Services', 'Food Services', 'Hotels, Casinos, Resorts', 'Publishing, Printing', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Medical Products and Equipment', 'Mining, Crude-Oil Production', 'Miscellaneous', 'Hotels, Casinos, Resorts', 'Chemicals', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Diversified Financials', 'Semiconductors and Other Electronic Components', 'Home Equipment, Furnishings', 'Entertainment', 'Metals', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'General Merchandisers', 'Securities', 'Beverages', 'Entertainment', 'Engineering, Construction', 'Engineering, Construction', 'Waste Management', 'Energy', 'Chemicals', 'Food Services', 'Health Care: Insurance and Managed Care', 'Commercial Banks', 'Diversified Outsourcing Services', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (Mutual)', 'Wholesalers: Diversified', 'Securities', 'Industrial Machinery', 'Securities', 'Computer Software', 'Health Care: Medical Facilities', 'Medical Products and Equipment', 'Internet Services and Retailing', 'Metals', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Insurance: Life, Health (stock)', 'Real estate', 'Trucking, Truck Leasing', 'Information Technology Services', 'Homebuilders', 'Diversified Financials', 'Apparel', 'Entertainment', 'Pipelines', 'Network and Other Communications Equipment', 'Specialty Retailers: Apparel', 'Diversified Outsourcing Services', 'Real estate', 'Wholesalers: Diversified', 'Apparel', 'Food Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Scientific,Photographic and Control Equipment', 'Metals', 'Medical Products and Equipment', 'Information Technology Services', 'Commercial Banks', 'Entertainment', 'Building Materials, Glass', 'Scientific,Photographic and Control Equipment', 'Computer Software', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Airlines', 'Household and Personal Products', 'Health Care: Pharmacy and Other Services', 'Real estate', 'Semiconductors and Other Electronic Components', 'Utilities: Gas and Electric', 'Scientific,Photographic and Control Equipment', 'Wholesalers: Diversified', 'Airlines', 'Temporary Help', 'Industrial Machinery', 'Real estate', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Semiconductors and Other Electronic Components', 'Real estate', 'Wholesalers: Diversified', 'Education', 'Insurance: Property and Casualty (Mutual)', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Railroads', 'Homebuilders', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Utilities: Gas and Electric', 'Entertainment', 'Utilities: Gas and Electric', 'Miscellaneous', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Temporary Help', 'Pipelines', 'Construction and Farm Machinery', 'Internet Services and Retailing', 'Aerospace and Defense', 'Pharmaceuticals', 'Real estate', 'Insurance: Property and Casualty (Stock)', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Apparel', 'Chemicals', 'Forest and Paper Products', 'Real estate', 'Food and Drug Stores', 'Securities', 'Homebuilders', 'Information Technology Services', 'Internet Services and Retailing', 'Petroleum Refining', 'Health Care: Pharmacy and Other Services', 'Specialty Retailers: Other', 'Entertainment', 'Trucking, Truck Leasing', 'Computer Software', 'Electronics, Electrical Equip.', 'Hotels, Casinos, Resorts', 'Engineering, Construction', 'Industrial Machinery', 'Industrial Machinery', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Oil and Gas Equipment, Services', 'Energy', 'Apparel', 'Semiconductors and Other Electronic Components', 'Real estate', 'Industrial Machinery', 'Chemicals', 'Mining, Crude-Oil Production', 'Real estate', 'Semiconductors and Other Electronic Components', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Scientific,Photographic and Control Equipment', 'Chemicals', 'Securities', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Semiconductors and Other Electronic Components', 'Insurance: Life, Health (Mutual)', 'Industrial Machinery', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Home Equipment, Furnishings', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Energy', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Miscellaneous', 'Motor Vehicles and Parts', 'Utilities: Gas and Electric', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Financial Data Services', 'Financial Data Services', 'Diversified Financials', 'Internet Services and Retailing', 'Financial Data Services', 'Securities', 'Food Consumer Products', 'Mining, Crude-Oil Production', 'Computer Software', 'Food Services', 'Shipping', 'Financial Data Services', 'Railroads', 'Telecommunications', 'Chemicals', 'Specialty Retailers: Other', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Home Equipment, Furnishings', 'Real estate', 'Transportation Equipment', 'Scientific,Photographic and Control Equipment', 'Real estate', 'Energy', 'Transportation and Logistics', 'Information Technology Services', 'Motor Vehicles and Parts', 'Medical Products and Equipment', 'Financial Data Services', 'Commercial Banks', 'Computer Software', 'Medical Products and Equipment', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Securities', 'Aerospace and Defense', 'Network and Other Communications Equipment', 'Chemicals', 'Trucking, Truck Leasing', 'Pipelines', 'Pharmaceuticals', 'Health Care: Medical Facilities', 'Tobacco', 'Industrial Machinery', 'Medical Products and Equipment', 'Motor Vehicles and Parts', 'Food and Drug Stores', 'Wholesalers: Diversified', 'Industrial Machinery', 'Computer Software', 'Building Materials, Glass', 'Household and Personal Products', 'Shipping', 'Network and Other Communications Equipment', 'Pipelines', 'Commercial Banks', 'Electronics, Electrical Equip.', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Aerospace and Defense', 'Health Care: Pharmacy and Other Services', 'Building Materials, Glass', 'Chemicals', 'Aerospace and Defense', 'Medical Products and Equipment', 'Publishing, Printing', 'Homebuilders', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Hotels, Casinos, Resorts', 'Industrial Machinery', 'Home Equipment, Furnishings', 'Transportation and Logistics', 'Computer Software', 'Computer Software', 'Specialty Retailers: Apparel', 'Financial Data Services', 'Computer Software', 'Building Materials, Glass', 'Aerospace and Defense', 'Energy', 'Securities', 'Chemicals', 'Oil and Gas Equipment, Services', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Homebuilders', 'Semiconductors and Other Electronic Components', 'Hotels, Casinos, Resorts', 'Insurance: Life, Health (stock)', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Aerospace and Defense', 'Specialty Retailers: Other', 'Oil and Gas Equipment, Services', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Health Care: Pharmacy and Other Services', 'Financial Data Services', 'Health Care: Medical Facilities', 'Real estate'], 'City': ['Bentonville', 'Irving', 'Omaha', 'Cupertino', 'Minnetonka', 'SF', 'Woonsocket', 'Seattle', 'Dallas', 'Detroit', 'Dearborn', 'Chesterbrook', 'San Ramon', 'Dublin', 'Issaquah', 'New York', 'Cincinnati', 'Boston', 'Deerfield', 'New York', 'Leavenworth', 'Mountain View', 'Atlanta', 'Charlotte', 'St. Louis', 'SF', 'Chicago', 'Houston', 'Indianapolis', 'Redmond', 'San Antonio', 'New York', 'Philadelphia', 'Armonk', 'Round Rock', 'Bloomington', 'New Brunswick', 'McLean', 'Minneapolis', 'Mooresville', 'Findlay', 'Cincinnati', 'New York', 'Atlanta', 'Harrison', 'Santa Clara', 'Midland', 'Chicago', 'Hartford', 'Memphis', 'Farmington', 'Newark', 'Boise', 'Houston', 'Burbank', 'Louisville', 'New York', 'Palo Alto', 'Bethesda', 'New York', 'St. Louis', 'San Jose', 'Nashville', 'Dallas', 'Deerfield', 'Columbus', 'New York', 'Boston', 'New York', 'New York', 'Fort Worth', 'Richfield', 'Bloomfield', 'Stamford', 'Atlanta', 'Menlo Park', 'Morris Plains', 'Kenilworth', 'Northbrook', 'Springdale', 'Chicago', 'Redwood City', 'Clearwater', 'New York', 'Framingham', 'New York', 'Atlanta', 'Lakeland', 'Beaverton', 'San Antonio', 'Miami', 'Chicago', 'Springfield', 'Camp Hill', 'Houston', 'Inver Grove Heights', 'St Paul', 'New York', 'Falls Church', 'San Antonio', 'McLean', 'Moline', 'New York', 'Milwaukee', 'Houston', 'New York', 'Palo Alto', 'New York', 'New York', 'North Chicago', 'Lake Bluff', 'Mayfield', 'Centennial', 'Pittsburgh', 'Houston', 'San Mateo', 'Deerfield', 'Falls Church', 'Waltham', 'Cincinnati', 'Rosemont', 'Minneapolis', 'Goodlettsville', 'Memphis', 'Charlotte', 'Atlanta', 'Bethesda', 'Phoenix', 'Indianapolis', 'Thousand Oaks', 'Oak Brook', 'Seattle', 'San Diego', 'Chesapeake', 'Parsippany-Troy Hills', 'New York', 'Columbus', 'Fort Lauderdale', 'Bloomfield Hills', 'Benton Harbor', 'Omaha', 'Dallas', 'Milwaukee', 'Waltham', 'New York', 'Houston', 'Dallas', 'Southfield', 'Columbus', 'Boise', 'Charlotte', 'Long Beach', 'Irving', 'Richmond', 'Bellevue', 'Hartford', 'Menomonee Falls', 'San Jose', 'St. Petersburg', 'Franklin', 'SF', 'Leavenworth', 'Irving', 'Los Angeles', 'Pittsburgh', 'Monroe', 'North Palm Beach', 'SF', 'Fremont', 'Tampa', 'Richmond', 'Hoffman Estates', 'Stamford', 'Richmond', 'New York City', 'Phoenix', 'Atlanta', 'St. Louis', 'Denver', 'Eden Prairie', 'SF', 'Minneapolis', 'Seattle', 'New York', 'Columbus', 'Greenwich', 'Akron', 'New York', 'Lincolnshire', 'Cleveland', 'Pittsburgh', 'Dallas', 'Eden Prairie', 'Atlanta', 'Teaneck', 'Hoboken', 'New York', 'Nashville', 'St. Louis', 'Philadelphia', 'Santa Clara', 'Houston', 'Englewood', 'Glenview', 'Wayne', 'Dallas', 'Los Angeles', 'Providence', 'Dublin', 'Des Moines', 'Arlington', 'New York', 'Oklahoma City', 'Arlington', 'St Paul', 'Arden Hills', 'New York', 'Houston', 'Akron', 'Houston', 'New York', 'San Jose', 'Tulsa', 'Summit', 'New York', 'Battle Creek', 'Las Vegas', 'New Britain', 'Norwalk', 'Miami', 'Columbus', 'Detroit', 'Richmond', 'Chesterfield', 'Plano', 'Harrison', 'New York', 'Melville', 'New York', 'Kalamazoo', 'New York', 'Greensboro', 'Roseland', 'Rosemead', 'Cambridge', 'Pittsburgh', 'South San Francisco', 'Union', 'Tulsa', 'Winston-Salem', 'Franklin Lakes', 'Minneapolis', 'Los Angeles', 'New York', 'New York', 'Cleveland', 'The Woodlands', 'New York', 'Boston', 'Palo Alto', 'Los Gatos', 'Pittsburgh', 'Riverwoods', 'Danbury', 'Jacksonville', 'Minneapolis', 'Chattanooga', 'King of Prussia', 'Princeton', 'Houston', 'San Diego', 'Wayne', 'Houston', 'New Orleans', 'Denver', 'New York', 'Broomfield', 'Memphis', 'El Dorado', 'Las Vegas', 'Boca Raton', 'The Woodlands', 'Deerfield', 'Norfolk', 'SF', 'Burlington', 'Lake Forest', 'Englewood', 'Auburn Hills', 'Beverly Hills', 'Norwalk', 'Reston', 'Corning', 'Medford', 'Bellevue', 'Phoenix', 'Dallas', 'Charlotte', 'Detroit', 'Chicago', 'Auburn Hills', 'Jacksonville', 'Atlanta', 'Durham', 'Los Angeles', 'Santa Clara', 'New York', 'Houston', 'San Jose', 'Kingsport', 'Madison', 'Fort Wayne', 'Newport Beach', 'Oklahoma City', 'Calhoun', 'Houston', 'Roanoke', 'Mechanicsville', 'Providence', 'Lake Forest', 'Chicago', 'Grapevine', 'Austin', 'McLean', 'Norwalk', 'Jacksonville', 'Newark', 'Marlboro', 'Springfield', 'SF', 'Waltham', 'New York', 'Parsippany-Troy Hills', 'Houston', 'Estero', 'Houston', 'Omaha', 'Philadelphia', 'Omaha', 'Coraopolis', 'Atlanta', 'Lisle', 'Minneapolis', 'Denver', 'Allentown', 'Atlanta', 'Duluth', 'Richmond', 'Downers Grove', 'New York', 'Byron Center', 'Houston', 'Tulsa', 'Fremont', 'Seattle', 'Chicago', 'Glenview', 'Camden', 'New York', 'Downers Grove', 'Warsaw', 'Dallas', 'New York', 'Springfield', 'Plano', 'Cincinnati', 'Secaucus', 'Norwalk', 'Greenwich', 'Pittsburgh', 'New York', 'Milwaukee', 'Livonia', 'Tysons', 'Lansing', 'Des Peres', 'Englewood', 'Erie', 'Hershey', 'Allentown', 'Newport News', 'Plymouth', 'Orrville', 'Brentwood', 'Greenwood Village', 'Victor', 'Miami', 'Houston', 'San Jose', 'Brentwood', 'Brentwood', 'Elkhart', 'Maumee', 'Seattle', 'Lowell', 'Orlando', 'Plano', 'New York', 'Evansville', 'Dallas', 'Santa Monica', 'Wilco inc', 'Wallingford', 'El Segundo', 'Wichita', 'Chicago', 'Melbourne', 'Seattle', 'Silver Spring', 'Perrysburg', 'San Jose', 'Cleveland', 'Cincinnati', 'Oshkosh', 'Cedar Rapids', 'Louisville', 'Tempe', 'Plano', 'Boston', 'Everett', 'New York', 'New York', 'Mahwah', 'Stamford', 'Ankeny', 'St. Louis', 'Glendale', 'Coral Gables', 'Jackson', 'Atlanta', 'St. Petersburg', 'Atlanta', 'Winston-Salem', 'Duluth', 'Providence', 'Lake Forest', 'New York', 'Houston', 'Little Rock', 'New York', 'San Mateo', 'Toledo', 'Chicago', 'Reston', 'Milwaukee', 'Oak Brook', 'Las Vegas', 'Clayton', 'Detroit', 'Chicago', 'Wilmington', 'San Antonio', 'St. Louis', 'Rolling Meadows', 'Irving', 'Charlotte', 'King of Prussia', 'Madison', 'Burlington', 'Birmingham', 'Beckett Ridge', 'St Paul', 'New York', 'Glen Allen', 'Westlake', 'Florham Park', 'Buffalo', 'Oakland', 'New York', 'New York', 'Bolingbrook', 'Louisville', 'Tarrytown', 'Little Rock', 'Scottsdale', 'Cincinnati', 'Atlanta', 'Westchester', 'Parsippany-Troy Hills', 'Horsham', 'Merriam', 'McLean', 'Santa Ana', 'Fairfield', 'Rye', 'Chicago', 'Brookfield', 'Milwaukee', 'Houston', 'St Paul', 'St. Louis', 'Phoenix', 'Indianapolis', 'Englewood', 'Sunnyvale', 'Medina', 'Irving', 'New York', 'Irving', 'Cincinnati', 'New York', 'Bethesda', 'Troy', 'Kennett Square', 'Irving', 'Santa Clara', 'Parsippany-Troy Hills', 'SF', 'Deerfield', 'Columbus', 'Menlo Park', 'St. Louis', 'Pawtucket', 'Worcester', 'Wilmington', 'Mountain View', 'Fort Mill', 'Houston', 'North Kansas City', 'Norwood', 'Chicago', 'Deerfield', 'Hartsville', 'Sunnyvale', 'Irving', 'Falls Church', 'Baltimore', 'Medina', 'Columbus', 'SF', 'Libertyville', 'Overland Park', 'El Segundo', 'Johnston', 'Merrillville', 'Las Vegas', 'Redwood City', 'Houston', 'Sparks Glencoe', 'Baltimore', 'Sterling', 'Los Angeles', 'Brentwood', 'Columbus', 'Boston', 'New Braunfels', 'Rye Brook', 'Dallas', 'Chicago', 'Phoenix', 'North Canton', 'Sarasota', 'Commerce', 'Newton', 'New York City', 'Flint', 'Denver', 'Santa Clara', 'Reston', 'Bismarck', 'Mechanicsburg', 'Boise', 'New York', 'Cayce', 'Atlanta', 'Winona', 'Green Bay', 'Baltimore', 'Herndon', 'Los Angeles', 'Redwood City', 'Westport', 'Houston', 'Arlington', 'Miami', 'Charlotte', 'Toledo', 'Boca Raton', 'Berwyn', 'Carmel', 'Lincolnshire', 'Boston', 'Houston', 'Tampa', 'New York', 'Mountain View', 'Tempe', 'Manhattan Beach', 'Houston', 'New York', 'McKinney', 'Lake Success', 'Sussex', 'Deerfield', 'Scottsdale', 'Stamford', 'Neenah', 'New York', 'Oak Brook', 'Cincinnati', 'Black Mountain', 'Kenosha', 'York', 'Indianapolis', 'Atlanta', 'Birmingham', 'Raleigh', 'New York', 'Carthage', 'Grand Rapids', 'Denton', 'Thomasville', 'New York', 'West Des Moines', 'Birmingham', 'Scottsdale', 'Wilmerding', 'Houston', 'Boston', 'Richardson', 'Highland Heights', 'Pittsburgh', 'Ewing Township', 'West Palm Beach', 'Charlotte', 'Evansville', 'Annapolis Junction', 'Lincolnshire', 'Maumee', 'Omaha', 'Leavenworth', 'Shelton', 'Dallas', 'Irving', 'Irving', 'Denver', 'Woburn', 'Jacksonville', 'Houston', 'Houston', 'Chicago', 'Delaware', 'The Woodlands', 'Novi', 'Philadelphia', 'Chicago', 'Southlake', 'Omaha', 'Columbus', 'Lake Forest', 'New York', 'Chicago', 'Greenville', 'Phoenix', 'Knoxville', 'New Haven', 'Stamford', 'New York', 'Bloomington', 'Madison', 'Berwyn', 'Cleveland', 'Pittsburgh', 'Catawba', 'Atlanta', 'New Albany', 'Milpitas', 'Sunbury', 'Bellevue', 'Fort Lauderdale', 'Carmel', 'Avon Lake', 'Philadelphia', 'Irvine', 'Chandler', 'Reno', 'Los Angeles', 'Galveston', 'Atlanta', 'New York', 'Atlanta', 'Madison', 'Pittsburgh', 'Corona', 'Atlanta', 'Chicago', 'Atlanta', 'Beloit', 'Thomasville', 'Voorhees Township', 'New York', 'Richmond', 'Richmond', 'Stevens Point', 'Laurel', 'Northbrook', 'Stamford', 'New York City', 'Houston', 'Rochester Hills', 'Houston', 'Dallas', 'San Leandro', 'Santa Clara', 'Covington', 'Scottsdale', 'St. George', 'Chicago', 'Spring', 'Santa Rosa', 'Knoxville', 'New York', 'Rochester', 'Dallas', 'Wyomissing', 'McLean', 'Van Buren Charter Township', 'Parsippany-Troy Hills', 'Sunnyvale', 'Oklahoma City', 'Houston', 'Las Vegas', 'Charlotte', 'Dallas', 'Marlboro', 'KCMO', 'Greensboro', 'Grand Rapids', 'New York', 'Columbus', 'North Canton', 'Milwaukee', 'San Diego', 'St. Louis', 'Louisville', 'Plano', 'Watsonville', 'Palm Beach Gardens', 'Norwell', 'Tempe', 'Marysville', 'Lebanon', 'San Juan', 'SF', 'Memphis', 'Merrimack', 'Nashville', 'Cleveland', 'Melville', 'Baltimore', 'Cleveland', 'New York', 'Fort Lauderdale', 'Franklin', 'Palo Alto', 'Chicago', 'Cleveland', 'Portsmouth', 'Findlay', 'Lake Success', 'Horsham', 'Los Angeles', 'Fort Smith', 'New York', 'Irvine', 'New York', 'New York', 'New York', 'Oklahoma City', 'Hanover', 'Columbus', 'Cincinnati', 'McLean', 'Covington', 'Richardson', 'Ann Arbor', 'Stamford', 'St. Louis', 'Lexington', 'Pasadena', 'San Diego', 'Omaha', 'Chicago', 'Blue Bell', 'Salt Lake City', 'Cockeysville', 'Nashville', 'Columbus', 'Mountain View', 'Chicago', 'Boston', 'KCMO', 'Plano', 'Honolulu', 'New York', 'Raleigh', 'Glendale', 'Costa Mesa', 'Evansville', 'Sunnyvale', 'Houston', 'Miramar', 'Calabasas', 'Cleveland', 'SF', 'Fort Worth', 'Thousand Oaks', 'Malvern', 'Boston', 'Cleveland', 'Arlington', 'Lincoln', 'Midland', 'White Plains', 'KCMO', 'Denver', 'Topeka', 'Las Vegas', 'Honolulu', 'McLean', 'Las Vegas', 'Farmington', 'York', 'San Jose', 'Neenah', 'Tacoma', 'Tulsa', 'Bloomington', 'Cambridge', 'Elma Center', 'Boston', 'Chicago', 'Branchville', 'Crystal Lake', 'Scottsdale', 'Portland', 'Fairlawn', 'Miamisburg', 'SF', 'Pittsburgh', 'New York', 'Red Bank', 'Reston', 'SF', 'Houston', 'Waltham', 'Corte Madera', 'Irving', 'Phoenix', 'Raleigh', 'St. Louis', 'Las Vegas', 'Dallas', 'Milwaukee', 'Minneapolis', 'Elmsford', 'New York', 'Reading', 'Los Angeles', 'Houston', 'Leavenworth', 'Rockford', 'San Jose', 'New York', 'Elgin', 'Waterford', 'Cleveland', 'Chicago', 'San Jose', 'St. Louis', 'Tucker', 'Milford', 'St Paul', 'West Palm Beach', 'Waltham', 'Chesterfield', 'San Jose', 'New Haven', 'Lake Forest', 'KCMO', 'Fort Myers', 'Provo', 'Zeeland', 'Montpelier', 'Charlotte', 'Wall Township', 'Milwaukee', 'Memphis', 'Boca Raton', 'Indianapolis', 'Oklahoma City', 'Calabasas', 'Raleigh', 'Orlando', 'Leawood', 'Norcross', 'Coppell', 'Scottsdale', 'Pleasanton', 'Chicago', 'Charlotte', 'El Dorado', 'Hoffman Estates', 'Louisville', 'Houston', 'SF', 'Darien', 'Boulder', 'Richmond', 'Commerce', 'El Segundo', 'Owatonna', 'Muscatine', 'Newton', 'Lake Oswego', 'Hercules', 'Arlington', 'Ames', 'Harrison', 'Dayton', 'Elkhart', 'Wayne', 'Jersey City', 'Hato Rey', 'Pleasanton', 'Pleasanton', 'Columbus', 'North Reading', 'Omaha', 'Los Angeles', 'Fort Collins', 'Seattle', 'Lexington', 'Downers Grove', 'Tulsa', 'Franklin Township', 'Brentwood', 'Richmond', 'Westlake', 'San Diego', 'Livonia', 'Memphis', 'Tustin', 'Pittsburgh', 'San Rafael', 'Cary', 'Walnut Creek', 'Honolulu', 'Englewood', 'Houston', 'Santa Clara', 'Liberty Lake', 'Portland', 'Los Angeles', 'Bellevue', 'McLean', 'San Diego', 'New York', 'Dallas', 'Stamford', 'Westbrook', 'Shoreview', 'Columbus', 'Houston', 'Houston', 'Orlando', 'Charlotte', 'Lake Zurich', 'Chicago', 'San Jose', 'Burlington', 'Indianapolis', 'Chicago', 'Santa Clara', 'Denver', 'Chantilly', 'Overland Park', 'Greenwich', 'Northfield', 'Houston', 'Denver', 'Milwaukee', 'Atlanta', 'Andover', 'Broomfield', 'Cincinnati', 'Daytona Beach', 'Daytona Beach', 'El Segundo', 'Bernards', 'Houston', 'San Jose', 'Secaucus', 'Chicago', 'Bensalem', 'Roswell', 'Wilmington', 'Irvine', 'Mission Viejo', 'Irvine'], 'State': ['AR', 'TX', 'NE', 'CA', 'MN', 'CA', 'RI', 'WA', 'TX', 'MI', 'MI', 'PA', 'CA', 'OH', 'WA', 'NY', 'OH', 'MA', 'IL', 'NY', 'WA', 'CA', 'GA', 'NC', 'MO', 'CA', 'IL', 'TX', 'IN', 'WA', 'TX', 'NY', 'PA', 'NY', 'TX', 'IL', 'NJ', 'VA', 'MN', 'NC', 'OH', 'OH', 'NY', 'GA', 'NY', 'CA', 'Michigan', 'IL', 'CT', 'TN', 'CT', 'NJ', 'ID', 'TX', 'CA', 'KY', 'NY', 'CA', 'MD', 'NY', 'MO', 'CA', 'TN', 'TX', 'IL', 'OH', 'NY', 'MA', 'NY', 'NY', 'TX', 'MN', 'CT', 'CT', 'GA', 'CA', 'NJ', 'NJ', 'IL', 'AR', 'IL', 'CA', 'FL', 'NY', 'MA', 'NY', 'GA', 'FL', 'OR', 'TX', 'FL', 'IL', 'MA', 'PA', 'TX', 'MN', 'MN', 'NY', 'VA', 'TX', 'VA', 'IL', 'NY', 'WI', 'TX', 'NY', 'CA', 'NY', 'NY', 'IL', 'IL', 'OH', 'CO', 'PA', 'TX', 'CA', 'IL', 'VA', 'MA', 'OH', 'IL', 'MN', 'TN', 'TN', 'NC', 'GA', 'MD', 'AZ', 'IN', 'CA', 'IL', 'WA', 'CA', 'VA', 'NJ', 'NY', 'GA', 'FL', 'MI', 'MI', 'NE', 'TX', 'WI', 'MA', 'NY', 'TX', 'TX', 'MI', 'IN', 'ID', 'NC', 'CA', 'TX', 'VA', 'WA', 'CT', 'WI', 'CA', 'FL', 'TN', 'CA', 'WA', 'TX', 'CA', 'PA', 'LA', 'FL', 'CA', 'CA', 'FL', 'VA', 'IL', 'CT', 'VA', 'NY', 'AZ', 'GA', 'MO', 'CO', 'MN', 'CA', 'MN', 'WA', 'NY', 'OH', 'CT', 'OH', 'NY', 'IL', 'OH', 'PA', 'TX', 'MN', 'GA', 'NJ', 'NJ', 'NY', 'TN', 'MO', 'PA', 'CA', 'TX', 'CO', 'IL', 'PA', 'TX', 'CA', 'RI', 'CA', 'IA', 'TX', 'NY', 'OK', 'VA', 'MN', 'MN', 'NY', 'TX', 'OH', 'TX', 'NY', 'CA', 'OK', 'NJ', 'NY', 'MI', 'NV', 'CT', 'CT', 'FL', 'OH', 'MI', 'VA', 'MO', 'TX', 'NY', 'NY', 'NY', 'NY', 'MI', 'NY', 'NC', 'NJ', 'CA', 'MA', 'PA', 'CA', 'NJ', 'OK', 'NC', 'NJ', 'MN', 'CA', 'NY', 'NY', 'OH', 'TX', 'NY', 'MA', 'CA', 'CA', 'PA', 'IL', 'CT', 'FL', 'MN', 'TN', 'PA', 'NJ', 'TX', 'CA', 'NJ', 'TX', 'LA', 'CO', 'NY', 'CO', 'TN', 'AR', 'NV', 'FL', 'TX', 'IL', 'VA', 'CA', 'NC', 'IL', 'CO', 'MI', 'CA', 'CT', 'VA', 'NY', 'OR', 'WA', 'AZ', 'TX', 'NC', 'MI', 'IL', 'MI', 'FL', 'GA', 'NC', 'CA', 'CA', 'NY', 'TX', 'CA', 'TN', 'WI', 'IN', 'CA', 'OK', 'GA', 'TX', 'VA', 'VA', 'RI', 'IL', 'IL', 'TX', 'MN', 'VA', 'CT', 'FL', 'NJ', 'MA', 'MO', 'CA', 'MA', 'NY', 'NJ', 'TX', 'FL', 'TX', 'NE', 'PA', 'NE', 'PA', 'GA', 'IL', 'MN', 'CO', 'PA', 'GA', 'GA', 'VA', 'IL', 'NY', 'MI', 'TX', 'OK', 'CA', 'WA', 'IL', 'IL', 'NJ', 'NY', 'IL', 'IN', 'TX', 'NY', 'MA', 'TX', 'OH', 'NJ', 'CT', 'CT', 'PA', 'NY', 'WI', 'MI', 'VA', 'MI', 'MO', 'CO', 'PA', 'PA', 'PA', 'VA', 'MN', 'OH', 'TN', 'CO', 'NY', 'FL', 'TX', 'CA', 'TN', 'TN', 'IN', 'OH', 'WA', 'AR', 'FL', 'TX', 'NY', 'IN', 'TX', 'CA', 'NY', 'CT', 'CA', 'KS', 'IL', 'FL', 'WA', 'MD', 'OH', 'CA', 'OH', 'OH', 'WI', 'IA', 'KY', 'AZ', 'TX', 'MA', 'WA', 'NY', 'NY', 'NJ', 'CT', 'IA', 'MO', 'CA', 'FL', 'MI', 'GA', 'FL', 'GA', 'NC', 'GA', 'RI', 'IL', 'NY', 'TX', 'AR', 'NY', 'CA', 'OH', 'IL', 'VA', 'WI', 'IL', 'NV', 'MO', 'MI', 'IL', 'DE', 'TX', 'MO', 'IL', 'TX', 'NC', 'PA', 'NJ', 'NJ', 'AL', 'OH', 'MN', 'NY', 'VA', 'OH', 'NJ', 'NY', 'CA', 'NY', 'NY', 'IL', 'KY', 'NY', 'AR', 'AZ', 'OH', 'GA', 'IL', 'NJ', 'PA', 'KS', 'VA', 'CA', 'OH', 'NY', 'IL', 'WI', 'WI', 'TX', 'MN', 'MO', 'AZ', 'IN', 'CO', 'CA', 'MN', 'TX', 'NY', 'TX', 'OH', 'NY', 'MD', 'MI', 'PA', 'TX', 'CA', 'NJ', 'CA', 'IL', 'OH', 'CA', 'MO', 'RI', 'MA', 'DE', 'CA', 'SC', 'TX', 'MO', 'MA', 'IL', 'IL', 'SC', 'CA', 'TX', 'VA', 'MD', 'OH', 'GA', 'CA', 'IL', 'KS', 'CA', 'RI', 'IN', 'NV', 'CA', 'TX', 'MD', 'MD', 'VA', 'CA', 'TN', 'OH', 'MA', 'TX', 'NY', 'TX', 'IL', 'AZ', 'OH', 'FL', 'CA', 'NC', 'NY', 'MI', 'CO', 'CA', 'VA', 'ND', 'PA', 'ID', 'NY', 'SC', 'GA', 'MN', 'WI', 'MD', 'VA', 'CA', 'CA', 'CT', 'TX', 'VA', 'FL', 'NC', 'OH', 'FL', 'PA', 'IN', 'IL', 'MA', 'TX', 'FL', 'NY', 'CA', 'AZ', 'CA', 'TX', 'NY', 'TX', 'NY', 'WI', 'IL', 'AZ', 'CT', 'WI', 'NY', 'IL', 'OH', 'NC', 'WI', 'PA', 'IN', 'GA', 'AL', 'NC', 'NY', 'MO', 'MI', 'TX', 'GA', 'NY', 'IA', 'AL', 'AZ', 'PA', 'TX', 'MA', 'TX', 'KY', 'PA', 'NJ', 'FL', 'NC', 'IN', 'MD', 'IL', 'OH', 'NE', 'WA', 'CT', 'TX', 'TX', 'TX', 'CO', 'MA', 'FL', 'TX', 'TX', 'IL', 'OH', 'TX', 'MI', 'PA', 'IL', 'TX', 'NE', 'OH', 'IL', 'NY', 'IL', 'SC', 'AZ', 'TN', 'CT', 'CT', 'NY', 'IL', 'WI', 'PA', 'OH', 'PA', 'SC', 'GA', 'OH', 'CA', 'PA', 'WA', 'FL', 'IN', 'OH', 'PA', 'CA', 'AZ', 'NV', 'CA', 'TX', 'GA', 'NY', 'GA', 'WI', 'PA', 'CA', 'GA', 'IL', 'GA', 'WI', 'NC', 'NJ', 'NY', 'VA', 'VA', 'WI', 'MS', 'IL', 'CT', 'NY', 'TX', 'MI', 'TX', 'TX', 'CA', 'CA', 'KY', 'AZ', 'UT', 'IL', 'TX', 'CA', 'TN', 'NY', 'NY', 'TX', 'PA', 'VA', 'MI', 'NJ', 'CA', 'OK', 'TX', 'NV', 'NC', 'TX', 'MA', 'MO', 'NC', 'MI', 'NY', 'OH', 'OH', 'WI', 'CA', 'MO', 'KY', 'TX', 'CA', 'FL', 'MA', 'AZ', 'OH', 'TN', 'Puerto Rico', 'CA', 'TN', 'NH', 'TN', 'OH', 'NY', 'MD', 'OH', 'NY', 'FL', 'TN', 'CA', 'IL', 'OH', 'NH', 'OH', 'NY', 'PA', 'CA', 'AR', 'NY', 'CA', 'NY', 'NY', 'NY', 'OK', 'MD', 'OH', 'OH', 'VA', 'LA', 'TX', 'MI', 'CT', 'MO', 'KY', 'CA', 'CA', 'NE', 'IL', 'PA', 'UT', 'MD', 'TN', 'OH', 'CA', 'IL', 'MA', 'MO', 'TX', 'HI', 'NY', 'NC', 'CA', 'CA', 'IN', 'CA', 'TX', 'FL', 'CA', 'OH', 'CA', 'TX', 'CA', 'PA', 'MA', 'OH', 'VA', 'RI', 'TX', 'NY', 'MO', 'CO', 'KS', 'NV', 'HI', 'VA', 'NV', 'UT', 'PA', 'CA', 'WI', 'WA', 'OK', 'MN', 'MA', 'NY', 'MA', 'IL', 'NJ', 'IL', 'AZ', 'OR', 'OH', 'OH', 'CA', 'PA', 'NY', 'NJ', 'VA', 'CA', 'TX', 'MA', 'CA', 'TX', 'AZ', 'NC', 'MO', 'NV', 'TX', 'WI', 'MN', 'NY', 'NY', 'PA', 'CA', 'TX', 'WA', 'MI', 'CA', 'NY', 'IL', 'NY', 'OH', 'IL', 'CA', 'MO', 'GA', 'MA', 'MN', 'FL', 'MA', 'MO', 'CA', 'CT', 'IL', 'MO', 'FL', 'UT', 'MI', 'VT', 'NC', 'NJ', 'WI', 'TN', 'FL', 'IN', 'OK', 'CA', 'NC', 'FL', 'KS', 'GA', 'TX', 'AZ', 'CA', 'IL', 'NC', 'AR', 'IL', 'KY', 'TX', 'CA', 'CT', 'CO', 'VA', 'CA', 'CA', 'MN', 'IA', 'MA', 'OR', 'CA', 'VA', 'IA', 'NY', 'OH', 'IN', 'PA', 'NJ', 'Puerto Rico', 'CA', 'CA', 'OH', 'MA', 'NE', 'CA', 'CO', 'WA', 'KY', 'IL', 'OK', 'NJ', 'TN', 'VA', 'OH', 'CA', 'MI', 'TN', 'CA', 'PA', 'CA', 'NC', 'CA', 'HI', 'CO', 'TX', 'CA', 'WA', 'OR', 'CA', 'WA', 'VA', 'CA', 'NY', 'TX', 'CT', 'ME', 'MN', 'OH', 'TX', 'TX', 'FL', 'NC', 'IL', 'IL', 'CA', 'MA', 'IN', 'IL', 'CA', 'CO', 'VA', 'KS', 'CT', 'IL', 'TX', 'CO', 'WI', 'GA', 'MA', 'CO', 'OH', 'FL', 'FL', 'CA', 'NJ', 'TX', 'CA', 'NJ', 'IL', 'PA', 'GA', 'MA', 'CA', 'CA', 'CA'], 'Latitude': [36.372853799999994, 32.814017699999994, 41.2565369, 37.322997799999996, 44.9211836, 37.7749295, 42.0028761, 47.6062095, 32.7766642, 42.331427000000005, 42.3222599, 40.0756627, 37.7799273, 40.0992294, 47.5301011, 40.7127753, 39.103118200000004, 42.360082500000004, 42.171136499999996, 40.7127753, 47.751074100000004, 37.3860517, 33.7489954, 35.2270869, 38.6270025, 37.7749295, 41.8781136, 29.7604267, 39.768403, 47.6739881, 29.4241219, 40.7127753, 39.9525839, 41.1264849, 30.508255100000003, 40.4842027, 40.4862157, 38.933867600000006, 44.977753, 35.5848596, 41.04422, 39.103118200000004, 40.7127753, 33.7489954, 41.0400135, 37.354107899999995, 43.623574, 41.8781136, 41.76580429999999, 35.1495343, 41.7360305, 40.735657, 43.6150186, 29.7604267, 34.18083920000001, 38.252664700000004, 40.7127753, 37.441883399999995, 38.984652000000004, 40.7127753, 38.6270025, 37.338208200000004, 36.1626638, 32.7766642, 42.171136499999996, 39.9611755, 40.7127753, 42.360082500000004, 40.7127753, 40.7127753, 32.7554883, 44.8832982, 41.826488, 41.0534302, 33.7489954, 37.4529598, 40.839592200000006, 40.6764911, 42.127526700000004, 36.18674420000001, 41.8781136, 37.4852152, 27.9658533, 40.7127753, 42.279286, 40.7127753, 33.7489954, 28.039465399999997, 45.4887993, 29.4241219, 25.7616798, 41.8781136, 42.1014831, 40.2398118, 29.7604267, 44.8480218, 44.953702899999996, 40.7127753, 38.882334, 29.4241219, 38.933867600000006, 41.5067003, 40.7127753, 43.0389025, 29.7604267, 40.7127753, 37.441883399999995, 40.7127753, 40.7127753, 42.325578, 42.304505, 41.55199520000001, 39.5807452, 40.440624799999995, 29.7604267, 37.558546500000006, 42.171136499999996, 38.882334, 42.376485200000005, 39.103118200000004, 41.9867507, 44.977753, 36.3231066, 35.1495343, 35.2270869, 33.7489954, 38.984652000000004, 33.4483771, 39.768403, 34.1705609, 41.8397865, 47.6062095, 32.715738, 36.7682088, 40.865286499999996, 40.7127753, 32.4609764, 26.122438600000002, 42.583645000000004, 42.1167065, 41.2565369, 32.7766642, 43.0389025, 42.376485200000005, 40.7127753, 29.7604267, 32.7766642, 42.473368799999996, 39.201440399999996, 43.6150186, 35.2270869, 33.770050399999995, 32.814017699999994, 37.540724600000004, 47.6101497, 41.76580429999999, 43.1788967, 37.338208200000004, 27.767600800000004, 35.9250637, 37.7749295, 47.751074100000004, 32.814017699999994, 34.0522342, 40.440624799999995, 32.5093109, 26.879781899999998, 37.7749295, 37.548269700000006, 27.950575, 37.540724600000004, 42.062991499999995, 41.0534302, 37.540724600000004, 40.7127753, 33.4483771, 33.7489954, 38.6270025, 39.739235799999996, 44.8546856, 37.7749295, 44.977753, 47.6062095, 40.7127753, 39.9611755, 41.0262417, 41.081444700000006, 40.7127753, 42.190024900000004, 41.499320000000004, 40.440624799999995, 32.7766642, 44.8546856, 33.7489954, 40.8932469, 40.7439905, 40.7127753, 36.1626638, 38.6270025, 39.9525839, 37.354107899999995, 29.7604267, 39.647765299999996, 42.069750899999995, 40.0462208, 32.7766642, 34.0522342, 41.8239891, 37.7021521, 41.600544799999994, 32.735687, 40.7127753, 35.4675602, 38.8816208, 44.953702899999996, 45.0502435, 40.7127753, 29.7604267, 41.081444700000006, 29.7604267, 40.7127753, 37.338208200000004, 36.1539816, 40.714637599999996, 40.7127753, 42.32115220000001, 36.169941200000004, 41.6612104, 41.117744, 25.7616798, 39.9611755, 42.331427000000005, 37.540724600000004, 38.6631083, 33.0198431, 41.0400135, 40.7127753, 40.793432200000005, 40.7127753, 42.291706899999994, 40.7127753, 36.072635399999996, 40.8206555, 34.0805651, 42.373615799999996, 40.440624799999995, 37.654656, 40.697589799999996, 36.1539816, 36.0998596, 41.016763899999994, 44.977753, 34.165357, 40.7127753, 40.7127753, 41.499320000000004, 30.1658207, 40.7127753, 42.360082500000004, 37.441883399999995, 37.235807799999996, 40.440624799999995, 42.167525399999995, 41.394816999999996, 30.3321838, 44.977753, 35.0456297, 40.101285600000004, 40.3572976, 29.7604267, 32.715738, 40.925372499999995, 29.7604267, 29.9510658, 39.739235799999996, 40.7127753, 39.9205411, 35.1495343, 33.20763, 36.169941200000004, 26.3683064, 30.1658207, 42.171136499999996, 36.8507689, 37.7749295, 36.0956918, 42.2586342, 39.647765299999996, 42.6875323, 34.073620399999996, 41.117744, 38.9586307, 42.1428521, 42.3265152, 47.6101497, 33.4483771, 32.7766642, 35.2270869, 42.331427000000005, 41.8781136, 42.6875323, 30.3321838, 33.7489954, 35.980513, 34.0522342, 37.354107899999995, 40.7127753, 29.7604267, 37.338208200000004, 36.548434, 43.07305170000001, 41.079273, 33.618882899999996, 35.4675602, 34.502587, 29.7604267, 37.270970399999996, 37.6087561, 41.8239891, 42.2586342, 41.8781136, 32.9342919, 43.6666296, 38.933867600000006, 41.117744, 30.3321838, 40.735657, 42.3459271, 37.20895720000001, 37.7749295, 42.376485200000005, 40.7127753, 40.865286499999996, 29.7604267, 26.438136, 29.7604267, 41.2565369, 39.9525839, 41.2565369, 40.5184013, 33.7489954, 41.801140999999994, 44.977753, 39.739235799999996, 40.6022939, 33.7489954, 34.0028786, 37.540724600000004, 41.8089191, 40.7127753, 42.812250799999994, 29.7604267, 36.1539816, 37.548269700000006, 47.6062095, 41.8781136, 42.069750899999995, 39.9259463, 40.7127753, 41.8089191, 41.2381, 32.7766642, 40.7127753, 42.1014831, 33.0198431, 39.103118200000004, 40.7895453, 41.117744, 41.0262417, 40.440624799999995, 40.7127753, 43.0389025, 42.36837, 38.9187222, 42.732535, 38.59722, 39.647765299999996, 42.1292241, 40.2859239, 40.6022939, 37.087082099999996, 45.01051939999999, 40.843666299999995, 36.0331164, 39.6172101, 42.982563299999995, 25.7616798, 29.7604267, 37.338208200000004, 36.0331164, 36.0331164, 41.6819935, 41.5628294, 47.6062095, 36.25535429999999, 28.5383355, 33.0198431, 40.7127753, 37.9715592, 32.7766642, 34.01945429999999, 40.744679, 41.45701079999999, 33.9191799, 37.6871761, 41.8781136, 28.0836269, 47.6062095, 38.9906657, 41.556996000000005, 37.338208200000004, 41.499320000000004, 39.103118200000004, 44.024706200000004, 41.9778795, 38.252664700000004, 33.4255104, 33.0198431, 42.360082500000004, 47.9789848, 40.7127753, 40.7127753, 41.0886216, 41.0534302, 41.7317884, 38.6270025, 34.1425078, 25.72149, 42.245869, 33.7489954, 27.767600800000004, 33.7489954, 36.0998596, 34.0028786, 41.8239891, 42.2586342, 40.7127753, 29.7604267, 34.7464809, 40.7127753, 37.562991700000005, 41.6528052, 41.8781136, 38.9586307, 43.0389025, 41.8397865, 36.169941200000004, 38.6425518, 42.331427000000005, 41.8781136, 39.7390721, 29.4241219, 38.6270025, 42.0841936, 32.814017699999994, 35.2270869, 40.101285600000004, 40.7598227, 40.071222, 33.5206608, 39.332126200000005, 44.953702899999996, 40.7127753, 37.665978, 41.4553232, 40.787878000000006, 42.8864468, 37.8043637, 40.7127753, 40.7127753, 41.6986416, 38.252664700000004, 41.076207700000005, 34.7464809, 33.494170399999994, 39.103118200000004, 33.7489954, 41.8498339, 40.865286499999996, 40.1784422, 39.023616499999996, 38.933867600000006, 33.7454725, 39.345467299999996, 40.980653499999995, 41.8781136, 43.0605671, 43.0389025, 29.7604267, 44.953702899999996, 38.6270025, 33.4483771, 39.768403, 39.647765299999996, 37.368829999999996, 45.0352411, 32.814017699999994, 40.7127753, 32.814017699999994, 39.103118200000004, 40.7127753, 38.984652000000004, 42.6064095, 39.84677670000001, 32.814017699999994, 37.354107899999995, 40.865286499999996, 37.7749295, 42.171136499999996, 39.9611755, 37.4529598, 38.6270025, 41.878710999999996, 42.262593200000005, 39.7390721, 37.3860517, 35.007369700000005, 29.7604267, 39.1429081, 42.1943909, 41.8781136, 42.171136499999996, 34.3740431, 37.368829999999996, 32.814017699999994, 38.882334, 39.2903848, 41.143245, 32.4609764, 37.7749295, 42.2333571, 38.9822282, 33.9191799, 41.820519899999994, 41.482814399999995, 36.169941200000004, 37.4852152, 29.7604267, 39.530938899999995, 39.2903848, 38.962489899999994, 34.3058279, 36.0331164, 39.9611755, 42.360082500000004, 29.7030024, 41.0192641, 32.7766642, 41.8781136, 33.4483771, 40.875890999999996, 27.3364347, 34.0005691, 35.7344538, 40.7127753, 43.012527399999996, 39.739235799999996, 37.354107899999995, 38.9586307, 46.808326799999996, 40.2142565, 43.6150186, 40.7127753, 33.9657091, 33.7489954, 44.0553908, 44.51331879999999, 39.2903848, 38.9695545, 34.0522342, 37.4852152, 41.141471700000004, 29.7604267, 38.8816208, 25.7616798, 35.2270869, 41.6528052, 26.3683064, 40.045823999999996, 39.978371, 42.190024900000004, 42.360082500000004, 29.7604267, 27.950575, 40.7127753, 37.3860517, 33.4255104, 33.884736100000005, 29.7604267, 40.7127753, 33.197246500000006, 40.7706572, 43.13418, 42.171136499999996, 33.494170399999994, 41.0534302, 44.1858193, 40.7127753, 41.8397865, 39.2807348, 35.6178951, 42.5847425, 39.9625984, 39.768403, 33.7489954, 33.5206608, 35.7795897, 40.7127753, 37.176446999999996, 42.9633599, 33.2148412, 30.8365815, 40.7127753, 41.5772115, 33.5206608, 33.494170399999994, 40.3909023, 29.7604267, 42.360082500000004, 32.948333500000004, 39.033116899999996, 40.440624799999995, 40.2599864, 26.7153424, 35.2270869, 37.9715592, 39.1202934, 42.190024900000004, 41.5628294, 41.2565369, 47.751074100000004, 41.3164856, 32.7766642, 32.814017699999994, 32.814017699999994, 39.739235799999996, 42.479261799999996, 30.3321838, 29.7604267, 29.7604267, 41.8781136, 40.2986724, 30.1658207, 42.48059, 39.9525839, 41.8781136, 32.9412363, 41.2565369, 39.9611755, 42.2586342, 40.7127753, 41.8781136, 34.8526176, 33.4483771, 35.96063839999999, 41.308274, 41.0534302, 40.7127753, 40.4842027, 43.07305170000001, 40.045823999999996, 41.499320000000004, 40.440624799999995, 34.85292329999999, 33.7489954, 40.0811745, 37.4323341, 40.862584999999996, 47.6101497, 26.1669711, 39.978371, 41.50531779999999, 39.9525839, 33.6845673, 33.3061605, 39.529632899999996, 34.0522342, 29.3013479, 33.7489954, 40.7127753, 33.7489954, 43.07305170000001, 40.440624799999995, 33.8752935, 33.7489954, 41.8781136, 33.7489954, 42.5083482, 35.8826369, 39.851944700000004, 40.7127753, 37.540724600000004, 37.540724600000004, 44.5235792, 31.694050899999997, 42.127526700000004, 41.0534302, 40.7127753, 29.7604267, 42.6583661, 30.0575359, 32.7766642, 37.7249296, 37.354107899999995, 39.083671200000005, 33.494170399999994, 37.0965278, 41.8781136, 30.079940500000003, 38.440428999999995, 35.96063839999999, 40.7127753, 43.156577899999995, 32.7766642, 40.329537, 38.933867600000006, 42.2203171, 40.865286499999996, 37.368829999999996, 35.4675602, 29.7604267, 36.169941200000004, 35.2270869, 32.7766642, 42.3459271, 39.0997265, 36.072635399999996, 42.9633599, 40.7127753, 39.9611755, 40.875890999999996, 43.0389025, 32.715738, 38.6270025, 38.252664700000004, 33.0198431, 36.910231, 26.8233946, 42.1615157, 33.4255104, 40.2364486, 36.2081098, 18.465539399999997, 37.7749295, 35.1495343, 42.867869299999995, 36.1626638, 41.499320000000004, 40.793432200000005, 39.2903848, 41.499320000000004, 40.7127753, 26.122438600000002, 35.9250637, 37.441883399999995, 41.8781136, 41.499320000000004, 43.071755200000005, 41.04422, 40.7706572, 40.1784422, 34.0522342, 35.385924200000005, 40.7127753, 33.6845673, 40.7127753, 40.7127753, 40.7127753, 35.4675602, 39.1955042, 39.9611755, 39.103118200000004, 38.933867600000006, 30.4754702, 32.948333500000004, 42.2808256, 41.0534302, 38.6270025, 38.040583700000006, 34.1477849, 32.715738, 41.2565369, 41.8781136, 40.1523309, 40.760779299999996, 39.490001299999996, 36.1626638, 39.9611755, 37.3860517, 41.8781136, 42.360082500000004, 39.0997265, 33.0198431, 21.3069444, 40.7127753, 35.7795897, 34.1425078, 33.6412156, 37.9715592, 37.368829999999996, 29.7604267, 25.9860762, 34.1367208, 41.499320000000004, 37.7749295, 32.7554883, 34.1705609, 40.0362184, 42.360082500000004, 41.499320000000004, 38.8816208, 41.911012299999996, 31.9973456, 41.0339862, 39.0997265, 39.739235799999996, 39.0473451, 36.169941200000004, 21.3069444, 38.933867600000006, 36.169941200000004, 40.9804999, 39.9625984, 37.338208200000004, 44.1858193, 47.252876799999996, 36.1539816, 44.840798, 42.373615799999996, 42.82122879999999, 42.360082500000004, 41.8781136, 41.1464852, 42.2411344, 33.494170399999994, 45.5122308, 41.127833, 39.642836200000005, 37.7749295, 40.440624799999995, 40.7127753, 40.347054299999996, 38.9586307, 37.7749295, 29.7604267, 42.376485200000005, 37.92548060000001, 32.814017699999994, 33.4483771, 35.7795897, 38.6270025, 36.169941200000004, 32.7766642, 43.0389025, 44.977753, 41.055096899999995, 40.7127753, 40.335648299999995, 34.0522342, 29.7604267, 47.751074100000004, 43.1200272, 37.338208200000004, 40.7127753, 42.035408399999994, 42.7925777, 41.499320000000004, 41.8781136, 37.338208200000004, 38.6270025, 33.8545479, 42.13985770000001, 44.953702899999996, 26.7153424, 42.376485200000005, 38.6631083, 37.338208200000004, 41.308274, 42.2586342, 39.0997265, 26.640628000000003, 40.233843799999995, 42.8125246, 44.2600593, 35.2270869, 40.160666600000006, 43.0389025, 35.1495343, 26.3683064, 39.768403, 35.4675602, 34.1372953, 35.7795897, 28.5383355, 38.966673, 33.969864, 32.954568699999996, 33.494170399999994, 37.6624312, 41.8781136, 35.2270869, 33.20763, 42.062991499999995, 38.252664700000004, 29.7604267, 37.7749295, 41.0771914, 40.0149856, 37.540724600000004, 34.0005691, 33.9191799, 44.085557200000004, 41.424473, 42.337041299999996, 45.4156817, 38.017144099999996, 38.8816208, 42.03078120000001, 41.0400135, 39.758947799999994, 41.6819935, 40.041599600000005, 40.7281575, 18.4225782, 37.6624312, 37.6624312, 39.9611755, 42.5750939, 41.2565369, 34.0522342, 40.5852602, 47.6062095, 38.040583700000006, 41.8089191, 36.1539816, 40.497603999999995, 36.0331164, 37.540724600000004, 41.4553232, 32.715738, 42.36837, 35.1495343, 33.7420005, 40.440624799999995, 37.9735346, 35.791540000000005, 37.9100783, 21.3069444, 39.647765299999996, 29.7604267, 37.354107899999995, 47.6743428, 45.5122308, 34.0522342, 47.6101497, 38.933867600000006, 32.715738, 40.7127753, 32.7766642, 41.0534302, 43.6770252, 45.0791325, 39.9611755, 29.7604267, 29.7604267, 28.5383355, 35.2270869, 42.1969689, 41.8781136, 37.338208200000004, 42.5047161, 39.768403, 41.8781136, 37.354107899999995, 39.739235799999996, 38.8942786, 38.9822282, 41.0262417, 42.09975, 29.7604267, 39.739235799999996, 43.0389025, 33.7489954, 42.6583356, 39.9205411, 39.103118200000004, 29.2108147, 29.2108147, 33.9191799, 40.7066174, 29.7604267, 37.338208200000004, 40.7895453, 41.8781136, 40.0994425, 34.0232431, 42.5481714, 33.6845673, 33.596891299999996, 33.6845673], 'Longitude': [-94.2088172, -96.9488945, -95.93450340000001, -122.03218229999999, -93.46874890000001, -122.4194155, -71.5147839, -122.33207079999998, -96.7969879, -83.0457538, -83.1763145, -75.4590816, -121.9780153, -83.11407709999999, -122.03261909999999, -74.0059728, -84.5120196, -71.0588801, -87.8445119, -74.0059728, -120.7401385, -122.08385109999999, -84.3879824, -80.8431267, -90.1994042, -122.4194155, -87.62979820000001, -95.36980279999999, -86.158068, -122.12151200000001, -98.4936282, -74.0059728, -75.1652215, -73.71401949999999, -97.678896, -88.9936873, -74.4518188, -77.17726040000001, -93.2650108, -80.81007240000001, -83.6499321, -84.5120196, -74.0059728, -84.3879824, -73.71444770000001, -121.9552356, -84.232105, -87.62979820000001, -72.6733723, -90.0489801, -72.795027, -74.1723667, -116.2023137, -95.36980279999999, -118.30896609999998, -85.7584557, -74.0059728, -122.14301950000001, -77.09470920000001, -74.0059728, -90.1994042, -121.88632859999998, -86.78160159999999, -96.7969879, -87.8445119, -82.9987942, -74.0059728, -71.0588801, -74.0059728, -74.0059728, -97.3307658, -93.28300209999999, -72.73009449999999, -73.53873409999999, -84.3879824, -122.1817252, -74.4818698, -74.2907032, -87.82895479999999, -94.1288141, -87.62979820000001, -122.2363548, -82.8001026, -74.0059728, -71.4161565, -74.0059728, -84.3879824, -81.9498042, -122.80133319999999, -98.4936282, -80.1917902, -87.62979820000001, -72.589811, -76.91997420000001, -95.36980279999999, -93.0427153, -93.0899578, -74.0059728, -77.17109140000001, -98.4936282, -77.17726040000001, -90.5151342, -74.0059728, -87.9064736, -95.36980279999999, -74.0059728, -122.14301950000001, -74.0059728, -74.0059728, -87.8411818, -87.89607120000001, -81.4392828, -104.87717260000001, -79.9958864, -95.36980279999999, -122.2710788, -87.8445119, -77.17109140000001, -71.2356113, -84.5120196, -87.87216020000001, -93.2650108, -86.7133302, -90.0489801, -80.8431267, -84.3879824, -77.09470920000001, -112.07403729999999, -86.158068, -118.83759369999999, -87.9535534, -122.33207079999998, -117.1610838, -76.2874927, -74.41738769999999, -74.0059728, -84.9877094, -80.13731740000001, -83.24548829999999, -86.4541894, -95.93450340000001, -96.7969879, -87.9064736, -71.2356113, -74.0059728, -95.36980279999999, -96.7969879, -83.2218731, -85.9213796, -116.2023137, -80.8431267, -118.1937395, -96.9488945, -77.4360481, -122.2015159, -72.6733723, -88.1173132, -121.88632859999998, -82.6402915, -86.86888990000001, -122.4194155, -120.7401385, -96.9488945, -118.24368490000002, -79.9958864, -92.1193012, -80.0533743, -122.4194155, -121.98857190000001, -82.4571776, -77.4360481, -88.1227199, -73.53873409999999, -77.4360481, -74.0059728, -112.07403729999999, -84.3879824, -90.1994042, -104.990251, -93.470786, -122.4194155, -93.2650108, -122.33207079999998, -74.0059728, -82.9987942, -73.62819640000001, -81.51900529999999, -74.0059728, -87.90840390000001, -81.6943605, -79.9958864, -96.7969879, -93.470786, -84.3879824, -74.0116536, -74.0323626, -74.0059728, -86.78160159999999, -90.1994042, -75.1652215, -121.9552356, -95.36980279999999, -104.98775970000001, -87.7878408, -75.3599105, -96.7969879, -118.24368490000002, -71.4128343, -121.9357918, -93.6091064, -97.10806559999999, -74.0059728, -97.5164276, -77.0909809, -93.0899578, -93.15661120000001, -74.0059728, -95.36980279999999, -81.51900529999999, -95.36980279999999, -74.0059728, -121.88632859999998, -95.992775, -74.3646122, -74.0059728, -85.17971419999999, -115.13982959999998, -72.77954190000001, -73.4081575, -80.1917902, -82.9987942, -83.0457538, -77.4360481, -90.5770675, -96.6988856, -73.71444770000001, -74.0059728, -73.4151214, -74.0059728, -85.5872286, -74.0059728, -79.79197540000001, -74.2937594, -118.072846, -71.10973349999999, -79.9958864, -122.40774979999999, -74.2631635, -95.992775, -80.24421600000001, -74.2057011, -93.2650108, -118.6089752, -74.0059728, -74.0059728, -81.6943605, -95.4612625, -74.0059728, -71.0588801, -122.14301950000001, -121.96237509999999, -79.9958864, -87.897014, -73.45401109999999, -81.655651, -93.2650108, -85.3096801, -75.38355250000001, -74.66722259999999, -95.36980279999999, -117.1610838, -74.2765441, -95.36980279999999, -90.0715323, -104.990251, -74.0059728, -105.0866504, -90.0489801, -92.66626740000001, -115.13982959999998, -80.1289321, -95.4612625, -87.8445119, -76.28587259999999, -122.4194155, -79.43779909999999, -87.840625, -104.98775970000001, -83.23410279999999, -118.40035630000001, -73.4081575, -77.35700279999999, -77.05469029999999, -122.8755949, -122.2015159, -112.07403729999999, -96.7969879, -80.8431267, -83.0457538, -87.62979820000001, -83.23410279999999, -81.655651, -84.3879824, -78.90511, -118.24368490000002, -121.9552356, -74.0059728, -95.36980279999999, -121.88632859999998, -82.5618186, -89.4012302, -85.13935129999999, -117.9298493, -97.5164276, -84.9510542, -95.36980279999999, -79.9414266, -77.37331390000001, -71.4128343, -87.840625, -87.62979820000001, -97.07806540000001, -92.9746367, -77.17726040000001, -73.4081575, -81.655651, -74.1723667, -71.55228740000001, -93.2922989, -122.4194155, -71.2356113, -74.0059728, -74.41738769999999, -95.36980279999999, -81.8067523, -95.36980279999999, -95.93450340000001, -75.1652215, -95.93450340000001, -80.1667247, -84.3879824, -88.0747875, -93.2650108, -104.990251, -75.4714098, -84.3879824, -84.1446376, -77.4360481, -88.01117459999999, -74.0059728, -85.7228061, -95.36980279999999, -95.992775, -121.98857190000001, -122.33207079999998, -87.62979820000001, -87.7878408, -75.1196199, -74.0059728, -88.01117459999999, -85.85304690000001, -96.7969879, -74.0059728, -72.589811, -96.6988856, -84.5120196, -74.05652979999999, -73.4081575, -73.62819640000001, -79.9958864, -74.0059728, -87.9064736, -83.3527097, -77.2310925, -84.55553470000001, -90.448126, -104.98775970000001, -80.085059, -76.6502468, -75.4714098, -76.4730122, -93.4555093, -81.7640212, -86.7827772, -104.95081409999999, -77.40887940000002, -80.1917902, -95.36980279999999, -121.88632859999998, -86.7827772, -86.7827772, -85.9766671, -83.6538244, -122.33207079999998, -94.1307587, -81.3792365, -96.6988856, -74.0059728, -87.5710898, -96.7969879, -118.4911912, -73.94854240000001, -72.82307359999999, -118.4164652, -97.330053, -87.62979820000001, -80.6081089, -122.33207079999998, -77.026088, -83.627157, -121.88632859999998, -81.6943605, -84.5120196, -88.5426136, -91.66562320000001, -85.7584557, -111.9400054, -96.6988856, -71.0588801, -122.2020795, -74.0059728, -74.0059728, -74.1435843, -73.53873409999999, -93.6001278, -90.1994042, -118.255075, -80.2683838, -84.4013462, -84.3879824, -82.6402915, -84.3879824, -80.24421600000001, -84.1446376, -71.4128343, -87.840625, -74.0059728, -95.36980279999999, -92.2895948, -74.0059728, -122.32552539999999, -83.53786740000001, -87.62979820000001, -77.35700279999999, -87.9064736, -87.9535534, -115.13982959999998, -90.32372629999999, -83.0457538, -87.62979820000001, -75.5397878, -98.4936282, -90.1994042, -88.0131275, -96.9488945, -80.8431267, -75.38355250000001, -74.417097, -74.8648873, -86.80249, -84.41726659999999, -93.0899578, -74.0059728, -77.5063739, -81.9179173, -74.38820720000001, -78.8783689, -122.27111370000002, -74.0059728, -74.0059728, -88.0683955, -85.7584557, -73.85874609999999, -92.2895948, -111.9260519, -84.5120196, -84.3879824, -87.8806738, -74.41738769999999, -75.1285061, -94.69357009999999, -77.17726040000001, -117.867653, -84.56031870000001, -73.6837399, -87.62979820000001, -88.1064787, -87.9064736, -95.36980279999999, -93.0899578, -90.1994042, -112.07403729999999, -86.158068, -104.98775970000001, -122.03634960000001, -93.5824586, -96.9488945, -74.0059728, -96.9488945, -84.5120196, -74.0059728, -77.09470920000001, -83.1497751, -75.7116032, -96.9488945, -121.9552356, -74.41738769999999, -122.4194155, -87.8445119, -82.9987942, -122.1817252, -90.1994042, -71.38255579999999, -71.8022934, -75.5397878, -122.08385109999999, -80.9450759, -95.36980279999999, -94.5729781, -71.19896949999999, -87.62979820000001, -87.8445119, -80.0734005, -122.03634960000001, -96.9488945, -77.17109140000001, -76.6121893, -81.8552196, -84.9877094, -122.4194155, -87.9259058, -94.6707917, -118.4164652, -71.512617, -87.3328139, -115.13982959999998, -122.2363548, -95.36980279999999, -76.6458043, -76.6121893, -77.4380485, -118.45719740000001, -86.7827772, -82.9987942, -71.0588801, -98.1244531, -73.68346209999999, -96.7969879, -87.62979820000001, -112.07403729999999, -81.40233559999999, -82.5306527, -118.1597929, -81.3444573, -74.0059728, -83.6874562, -104.990251, -121.9552356, -77.35700279999999, -100.7837392, -77.0085876, -116.2023137, -74.0059728, -81.0739827, -84.3879824, -91.6663523, -88.0132958, -76.6121893, -77.38609759999999, -118.24368490000002, -122.2363548, -73.35790490000001, -95.36980279999999, -77.0909809, -80.1917902, -80.8431267, -83.53786740000001, -80.1289321, -75.4395931, -86.1180435, -87.90840390000001, -71.0588801, -95.36980279999999, -82.4571776, -74.0059728, -122.08385109999999, -111.9400054, -118.41090890000001, -95.36980279999999, -74.0059728, -96.6397822, -73.7176312, -88.22294000000001, -87.8445119, -111.9260519, -73.53873409999999, -88.462609, -74.0059728, -87.9535534, -84.3173878, -82.3212302, -87.8211854, -76.727745, -86.158068, -84.3879824, -86.80249, -78.6381787, -74.0059728, -94.3102228, -85.6680863, -97.13306829999999, -83.9787808, -74.0059728, -93.711332, -86.80249, -111.9260519, -79.8100472, -95.36980279999999, -71.0588801, -96.72985190000001, -84.45188540000001, -79.9958864, -74.7909125, -80.0533746, -80.8431267, -87.5710898, -76.7769324, -87.90840390000001, -83.6538244, -95.93450340000001, -120.7401385, -73.0931641, -96.7969879, -96.9488945, -96.9488945, -104.990251, -71.1522765, -81.655651, -95.36980279999999, -95.36980279999999, -87.62979820000001, -83.067965, -95.4612625, -83.47549129999999, -75.1652215, -87.62979820000001, -97.13417829999999, -95.93450340000001, -82.9987942, -87.840625, -74.0059728, -87.62979820000001, -82.3940104, -112.07403729999999, -83.9207392, -72.9278835, -73.53873409999999, -74.0059728, -88.9936873, -89.4012302, -75.4395931, -81.6943605, -79.9958864, -80.9111862, -84.3879824, -82.8087864, -121.8995741, -76.7944104, -122.2015159, -80.25659499999999, -86.1180435, -82.02820009999999, -75.1652215, -117.82650490000002, -111.8412502, -119.8138027, -118.24368490000002, -94.7976958, -84.3879824, -74.0059728, -84.3879824, -89.4012302, -79.9958864, -117.56643840000001, -84.3879824, -87.62979820000001, -84.3879824, -89.0317765, -80.0819879, -74.961517, -74.0059728, -77.4360481, -77.4360481, -89.574563, -89.1306124, -87.82895479999999, -73.53873409999999, -74.0059728, -95.36980279999999, -83.14993220000001, -95.19029859999999, -96.7969879, -122.1560768, -121.9552356, -84.5085536, -111.9260519, -113.5684164, -87.62979820000001, -95.41716009999999, -122.7140548, -83.9207392, -74.0059728, -77.6088465, -96.7969879, -75.96521170000001, -77.17726040000001, -83.4838244, -74.41738769999999, -122.03634960000001, -97.5164276, -95.36980279999999, -115.13982959999998, -80.8431267, -96.7969879, -71.55228740000001, -94.5785667, -79.79197540000001, -85.6680863, -74.0059728, -82.9987942, -81.40233559999999, -87.9064736, -117.1610838, -90.1994042, -85.7584557, -96.6988856, -121.7568946, -80.1386547, -70.7927832, -111.9400054, -83.3671432, -86.29110240000001, -66.1057355, -122.4194155, -90.0489801, -71.4948322, -86.78160159999999, -81.6943605, -73.4151214, -76.6121893, -81.6943605, -74.0059728, -80.13731740000001, -86.86888990000001, -122.14301950000001, -87.62979820000001, -81.6943605, -70.7625532, -83.6499321, -73.7176312, -75.1285061, -118.24368490000002, -94.39854749999999, -74.0059728, -117.82650490000002, -74.0059728, -74.0059728, -74.0059728, -97.5164276, -76.72282270000001, -82.9987942, -84.5120196, -77.17726040000001, -90.1009108, -96.72985190000001, -83.7430378, -73.53873409999999, -90.1994042, -84.50371640000002, -118.1445155, -117.1610838, -95.93450340000001, -87.62979820000001, -75.266289, -111.89104740000002, -76.6585074, -86.78160159999999, -82.9987942, -122.08385109999999, -87.62979820000001, -71.0588801, -94.5785667, -96.6988856, -157.8583333, -74.0059728, -78.6381787, -118.255075, -117.91882209999999, -87.5710898, -122.03634960000001, -95.36980279999999, -80.3035602, -118.66148090000002, -81.6943605, -122.4194155, -97.3307658, -118.83759369999999, -75.5138118, -71.0588801, -81.6943605, -77.0909809, -71.4418101, -102.0779146, -73.76290970000001, -94.5785667, -104.990251, -95.67515759999999, -115.13982959999998, -157.8583333, -77.17726040000001, -115.13982959999998, -111.8874392, -76.727745, -121.88632859999998, -88.462609, -122.4442906, -95.992775, -93.29827990000001, -71.10973349999999, -78.63419959999999, -71.0588801, -87.62979820000001, -74.7523874, -88.3161965, -111.9260519, -122.6587185, -81.609844, -84.2866083, -122.4194155, -79.9958864, -74.0059728, -74.0643065, -77.35700279999999, -122.4194155, -95.36980279999999, -71.2356113, -122.5274755, -96.9488945, -112.07403729999999, -78.6381787, -90.1994042, -115.13982959999998, -96.7969879, -87.9064736, -93.2650108, -73.8201337, -74.0059728, -75.9268747, -118.24368490000002, -95.36980279999999, -120.7401385, -85.5600316, -121.88632859999998, -74.0059728, -88.2825668, -73.6812293, -81.6943605, -87.62979820000001, -121.88632859999998, -90.1994042, -84.21714240000001, -71.51630490000001, -93.0899578, -80.0533746, -71.2356113, -90.5770675, -121.88632859999998, -72.9278835, -87.840625, -94.5785667, -81.87230840000001, -111.6585337, -86.018651, -72.57538690000001, -80.8431267, -74.0679753, -87.9064736, -90.0489801, -80.1289321, -86.158068, -97.5164276, -118.6541895, -78.6381787, -81.3792365, -94.6169012, -84.2212938, -97.01500779999999, -111.9260519, -121.8746789, -87.62979820000001, -80.8431267, -92.66626740000001, -88.1227199, -85.7584557, -95.36980279999999, -122.4194155, -73.4686858, -105.2705456, -77.4360481, -118.1597929, -118.4164652, -93.2259349, -91.0432051, -71.2092214, -122.7159726, -122.28858079999999, -77.0909809, -93.63191309999999, -73.71444770000001, -84.1916069, -85.9766671, -75.3698895, -74.0776417, -66.0509549, -121.8746789, -121.8746789, -82.9987942, -71.0786653, -95.93450340000001, -118.24368490000002, -105.084423, -122.33207079999998, -84.50371640000002, -88.01117459999999, -95.992775, -74.4884868, -86.7827772, -77.4360481, -81.9179173, -117.1610838, -83.3527097, -90.0489801, -117.82363909999998, -79.9958864, -122.5310874, -78.78111690000001, -122.06518190000001, -157.8583333, -104.98775970000001, -95.36980279999999, -121.9552356, -117.1124241, -122.6587185, -118.24368490000002, -122.2015159, -77.17726040000001, -117.1610838, -74.0059728, -96.7969879, -73.53873409999999, -70.3711617, -93.1471667, -82.9987942, -95.36980279999999, -95.36980279999999, -81.3792365, -80.8431267, -88.0934108, -87.62979820000001, -121.88632859999998, -71.19562049999999, -86.158068, -87.62979820000001, -121.9552356, -104.990251, -77.4310992, -94.6707917, -73.62819640000001, -87.7808967, -95.36980279999999, -104.990251, -87.9064736, -84.3879824, -71.1367953, -105.0866504, -84.5120196, -81.02283309999999, -81.02283309999999, -118.4164652, -74.54932840000001, -95.36980279999999, -121.88632859999998, -74.05652979999999, -87.62979820000001, -74.9325683, -84.36155550000001, -71.17244670000001, -117.82650490000002, -117.6581562, -117.82650490000002]}
| 55,236.5
| 110,472
| 0.712165
|
data={'title': ['Walmart', 'Exxon Mobil', 'Berkshire Hathaway', 'Apple', 'UnitedHealth Group', 'McKesson', 'CVS Health', 'Amazon.com', 'AT&T', 'General Motors', 'Ford Motor', 'AmerisourceBergen', 'Chevron', 'Cardinal Health', 'Costco', 'Verizon', 'Kroger', 'General Electric', 'Walgreens Boots Alliance', 'JPMorgan Chase', 'Fannie Mae', 'Alphabet', 'Home Depot', 'Bank of America Corp.', 'Express Scripts Holding', 'Wells Fargo', 'Boeing', 'Phillips 66', 'Anthem', 'Microsoft', 'Valero Energy', 'Citigroup', 'Comcast', 'IBM', 'Dell Technologies', 'State Farm Insurance Cos.', 'Johnson & Johnson', 'Freddie Mac', 'Target', 'Lowes', 'Marathon Petroleum', 'Procter & Gamble', 'MetLife', 'UPS', 'PepsiCo', 'Intel', 'DowDuPont', 'Archer Daniels Midland', 'Aetna', 'FedEx', 'United Technologies', 'Prudential Financial', 'Albertsons Cos.', 'Sysco', 'Disney', 'Humana', 'Pfizer', 'HP', 'Lockheed Martin', 'AIG', 'Centene', 'Cisco Systems', 'HCA Healthcare', 'Energy Transfer Equity', 'Caterpillar', 'Nationwide', 'Morgan Stanley', 'Liberty Mutual Insurance Group', 'New York Life Insurance', 'Goldman Sachs Group', 'American Airlines Group', 'Best Buy', 'Cigna', 'Charter Communications', 'Delta Air Lines', 'Facebook', 'Honeywell International', 'Merck', 'Allstate', 'Tyson Foods', 'United Continental Holdings', 'Oracle', 'Tech Data', 'TIAA', 'TJX', 'American Express', 'Coca-Cola', 'Publix Super Markets', 'Nike', 'Andeavor', 'World Fuel Services', 'Exelon', 'Massachusetts Mutual Life Insurance', 'Rite Aid', 'ConocoPhillips', 'CHS', '3M', 'Time Warner', 'General Dynamics', 'USAA', 'Capital One Financial', 'Deere', 'INTL FCStone', 'Northwestern Mutual', 'Enterprise Products Partners', 'Travelers Cos.', 'Hewlett Packard Enterprise', 'Philip Morris International', 'Twenty-First Century Fox', 'AbbVie', 'Abbott Laboratories', 'Progressive', 'Arrow Electronics', 'Kraft Heinz', 'Plains GP Holdings', 'Gilead Sciences', 'Mondelez International', 'Northrop Grumman', 'Raytheon', 'Macys', 'US Foods Holding', 'U.S. Bancorp', 'Dollar General', 'International Paper', 'Duke Energy', 'Southern', 'Marriott International', 'Avnet', 'Eli Lilly', 'Amgen', 'McDonalds', 'Starbucks', 'Qualcomm', 'Dollar Tree', 'PBF Energy', 'Icahn Enterprises', 'Aflac', 'AutoNation', 'Penske Automotive Group', 'Whirlpool', 'Union Pacific', 'Southwest Airlines', 'ManpowerGroup', 'Thermo Fisher Scientific', 'Bristol-Myers Squibb', 'Halliburton', 'Tenet Healthcare', 'Lear', 'Cummins', 'Micron Technology', 'Nucor', 'Molina Healthcare', 'Fluor', 'Altria Group', 'Paccar', 'Hartford Financial Services', 'Kohls', 'Western Digital', 'Jabil', 'Community Health Systems', 'Visa', 'Danaher', 'Kimberly-Clark', 'AECOM', 'PNC Financial Services', 'CenturyLink', 'NextEra Energy', 'PG& E Corp.', 'Synnex', 'WellCare Health Plans', 'Performance Food Group', 'Sears Holdings', 'Synchrony Financial', 'CarMax', 'Bank of New York Mellon', 'Freeport-McMoRan', 'Genuine Parts', 'Emerson Electric', 'DaVita', 'Supervalu', 'Gap', 'General Mills', 'Nordstrom', 'Colgate-Palmolive', 'American Electric Power', 'XPO Logistics', 'Goodyear Tire & Rubber', 'Omnicom Group', 'CDW', 'Sherwin-Williams', 'PPG Industries', 'Texas Instruments', 'C.H. Robinson Worldwide', 'WestRock', 'Cognizant Technology Solutions', 'Newell Brands', 'CBS', 'Envision Healthcare', 'Monsanto', 'Aramark', 'Applied Materials', 'Waste Management', 'DISH Network', 'Illinois Tool Works', 'Lincoln National', 'HollyFrontier', 'CBRE Group', 'Textron', 'Ross Stores', 'Principal Financial', 'D.R. Horton', 'Marsh & McLennan', 'Devon Energy', 'AES', 'Ecolab', "Land O'Lakes", 'Loews', 'Kinder Morgan', 'FirstEnergy', 'Occidental Petroleum', 'Viacom', 'PayPal Holdings', 'NGL Energy Partners', 'Celgene', 'Arconic', 'Kellogg', 'Las Vegas Sands', 'Stanley Black & Decker', 'Booking Holdings', 'Lennar', 'L Brands', 'DTE Energy', 'Dominion Energy', 'Reinsurance Group of America', 'J.C. Penney', 'Mastercard', 'BlackRock', 'Henry Schein', 'Guardian Life Ins. Co. of America', 'Stryker', 'Jefferies Financial Group', 'VF', 'ADP', 'Edison International', 'Biogen', 'United States Steel', 'Core-Mark Holding', 'Bed Bath & Beyond', 'Oneok', 'BB& T Corp.', 'Becton Dickinson', 'Ameriprise Financial', 'Farmers Insurance Exchange', 'First Data', 'Consolidated Edison', 'Parker-Hannifin', 'Anadarko Petroleum', 'Estee Lauder', 'State Street Corp.', 'Tesla', 'Netflix', 'Alcoa', 'Discover Financial Services', 'Praxair', 'CSX', 'Xcel Energy', 'Unum Group', 'Universal Health Services', 'NRG Energy', 'EOG Resources', 'Sempra Energy', "Toys 'R'ù Us", 'Group 1 Automotive', 'Entergy', 'Molson Coors Brewing', 'L3 Technologies', 'Ball', 'AutoZone', 'Murphy USA', 'MGM Resorts International', 'Office Depot', 'Huntsman', 'Baxter International', 'Norfolk Southern', 'salesforce.com', 'Laboratory Corp. of America', 'W.W. Grainger', 'Qurate Retail', 'Autoliv', 'Live Nation Entertainment', 'Xerox', 'Leidos Holdings', 'Corning', 'Lithia Motors', 'Expedia Group', 'Republic Services', 'Jacobs Engineering Group', 'Sonic Automotive', 'Ally Financial', 'LKQ', 'BorgWarner', 'Fidelity National Financial', 'SunTrust Banks', 'IQVIA Holdings', 'Reliance Steel & Aluminum', 'Nvidia', 'Voya Financial', 'CenterPoint Energy', 'eBay', 'Eastman Chemical', 'American Family Insurance Group', 'Steel Dynamics', 'Pacific Life', 'Chesapeake Energy', 'Mohawk Industries', 'Quanta Services', 'Advance Auto Parts', 'Owens & Minor', 'United Natural Foods', 'Tenneco', 'Conagra Brands', 'GameStop', 'Hormel Foods', 'Hilton Worldwide Holdings', 'Frontier Communications', 'Fidelity National Information Services', 'Public Service Enterprise Group', 'Boston Scientific', 'OReilly Automotive', 'Charles Schwab', 'Global Partners', 'PVH', 'Avis Budget Group', 'Targa Resources', 'Hertz Global Holdings', 'Calpine', 'Mutual of Omaha Insurance', 'Crown Holdings', 'Peter Kiewit Sons', 'Dicks Sporting Goods', 'PulteGroup', 'Navistar International', 'Thrivent Financial for Lutherans', 'DCP Midstream', 'Air Products & Chemicals', 'Veritiv', 'AGCO', 'Genworth Financial', 'Univar', 'News Corp.', 'SpartanNash', 'Westlake Chemical', 'Williams', 'Lam Research', 'Alaska Air Group', 'Jones Lang LaSalle', 'Anixter International', 'Campbell Soup', 'Interpublic Group', 'Dover', 'Zimmer Biomet Holdings', 'Dean Foods', 'Foot Locker', 'Eversource Energy', 'Alliance Data Systems', 'Fifth Third Bancorp', 'Quest Diagnostics', 'EMCOR Group', 'W.R. Berkley', 'WESCO International', 'Coty', 'WEC Energy Group', 'Masco', 'DXC Technology', 'Auto-Owners Insurance', 'Jones Financial (Edward Jones)', 'Liberty Media', 'Erie Insurance Group', 'Hershey', 'PPL', 'Huntington Ingalls Industries', 'Mosaic', 'J.M. Smucker', 'Delek US Holdings', 'Newmont Mining', 'Constellation Brands', 'Ryder System', 'National Oilwell Varco', 'Adobe Systems', 'LifePoint Health', 'Tractor Supply', 'Thor Industries', 'Dana', 'Weyerhaeuser', 'J.B. Hunt Transport Services', 'Darden Restaurants', 'Yum China Holdings', 'Blackstone Group', 'Berry Global Group', 'Builders FirstSource', 'Activision Blizzard', 'JetBlue Airways', 'Amphenol', 'A-Mark Precious Metals', 'Spirit AeroSystems Holdings', 'R.R. Donnelley & Sons', 'Harris', 'Expeditors Intl. of Washington', 'Discovery', 'Owens-Illinois', 'Sanmina', 'KeyCorp', 'American Financial Group', 'Oshkosh', 'Rockwell Collins', 'Kindred Healthcare', 'Insight Enterprises', 'Dr Pepper Snapple Group', 'American Tower', 'Fortive', 'Ralph Lauren', 'HRG Group', 'Ascena Retail Group', 'United Rentals', 'Caseys General Stores', 'Graybar Electric', 'Avery Dennison', 'MasTec', 'CMS Energy', 'HD Supply Holdings', 'Raymond James Financial', 'NCR', 'Hanesbrands', 'Asbury Automotive Group', 'Citizens Financial Group', 'Packaging Corp. of America', 'Alleghany', 'Apache', 'Dillards', 'Assurant', 'Franklin Resources', 'Owens Corning', 'Motorola Solutions', 'NVR', 'Rockwell Automation', 'TreeHouse Foods', 'Wynn Resorts', 'Olin', 'American Axle & Manufacturing', 'Old Republic International', 'Chemours', 'iHeartMedia', 'Ameren', 'Arthur J. Gallagher', 'Celanese', 'Sealed Air', 'UGI', 'Realogy Holdings', 'Burlington Stores', 'Regions Financial', 'AK Steel Holding', 'Securian Financial Group', 'S& P Global', 'Markel', 'TravelCenters of America', 'Conduent', 'M& T Bank Corp.', 'Clorox', 'AmTrust Financial Services', 'KKR', 'Ulta Beauty', 'Yum Brands', 'Regeneron Pharmaceuticals', 'Windstream Holdings', 'Magellan Health', 'Western & Southern Financial', 'Intercontinental Exchange', 'Ingredion', 'Wyndham Destinations', 'Toll Brothers', 'Seaboard', 'Booz Allen Hamilton', 'First American Financial', 'Cincinnati Financial', 'Avon Products', 'Northern Trust', 'Fiserv', 'Harley-Davidson', 'Cheniere Energy', 'Patterson', 'Peabody Energy', 'ON Semiconductor', 'Simon Property Group', 'Western Union', 'NetApp', 'Polaris Industries', 'Pioneer Natural Resources', 'ABM Industries', 'Vistra Energy', 'Cintas', 'Hess', 'Host Hotels & Resorts', 'Kelly Services', 'Genesis Healthcare', 'Michaels Cos.', 'Advanced Micro Devices', 'Zoetis', 'Williams-Sonoma', 'Fortune Brands Home & Security', 'Big Lots', 'Robert Half International', 'Post Holdings', 'Hasbro', 'Hanover Insurance Group', 'Navient', 'Intuit', 'Domtar', 'Marathon Oil', 'Cerner', 'Analog Devices', 'Telephone & Data Systems', 'Essendant', 'Sonoco Products', 'Juniper Networks', 'Commercial Metals', 'CSRA', 'Under Armour', 'RPM International', 'Total System Services', 'Levi Strauss', 'Brunswick', 'YRC Worldwide', 'Mattel', 'FM Global', 'NiSource', 'Caesars Entertainment', 'Electronic Arts', 'Dynegy', 'McCormick', 'T. Rowe Price', 'Orbital ATK', 'Tutor Perini', 'Brookdale Senior Living', 'Huntington Bancshares', 'Wayfair', 'Rush Enterprises', 'Xylem', 'Neiman Marcus Group', 'Hyatt Hotels', 'Sprouts Farmers Market', 'Diebold Nixdorf', 'Roper Technologies', 'Smart & Final Stores', 'CommScope Holding', 'Tapestry', 'Diplomat Pharmacy', 'Chipotle Mexican Grill', 'Agilent Technologies', 'Science Applications International', 'MDU Resources Group', 'Select Medical Holdings', 'Boise Cascade', 'National General Holdings', 'SCANA', 'Graphic Packaging Holding', 'Fastenal', 'Schneider National', 'Laureate Education', 'Beacon Roofing Supply', 'KB Home', 'Equinix', 'Terex', 'Crown Castle International', 'CACI International', 'Watsco', 'Coca-Cola Bottling', 'Welltower', 'ADT', 'Ametek', 'CNO Financial Group', 'Camping World Holdings', 'LPL Financial Holdings', 'Noble Energy', 'Bloomin Brands', 'Moodys', 'Symantec', 'Amkor Technology', 'Skechers U.S.A.', 'KBR', 'Tiffany', 'Torchmark', 'Broadridge Financial Solutions', 'Quad/Graphics', 'CF Industries Holdings', 'Carlisle', 'Silgan Holdings', 'Bemis', 'CA', 'Hub Group', 'Worldpay', 'Ingles Markets', 'Snap-on', 'Dentsply Sirona', 'Calumet Specialty Products', 'Global Payments', 'Encompass Health', 'Martin Marietta Materials', 'Nasdaq', 'Leggett & Platt', 'Universal Forest Products', 'Sally Beauty Holdings', 'Flowers Foods', 'Barnes & Noble', 'American Equity Investment Life', 'Vulcan Materials', 'Taylor Morrison Home', 'Westinghouse Air Brake', 'Crestwood Equity Partners', 'Iron Mountain', 'Lennox International', 'General Cable', 'American Eagle Outfitters', 'Church & Dwight', 'Platform Specialty Products', 'JELD-WEN Holding', 'OneMain Holdings', 'Colfax', 'Zebra Technologies', 'Andersons', 'TD Ameritrade Holding', 'Carlyle Group', 'Hubbell', 'Trinity Industries', 'Darling Ingredients', 'Flowserve', 'Antero Resources', 'Skyworks Solutions', 'Landstar System', 'Buckeye Partners', 'MRC Global', 'CME Group', 'Greif', 'Nexeo Solutions', 'Cooper-Standard Holdings', 'Urban Outfitters', 'LSC Communications', 'Sabre', 'Green Plains', 'Hexion', 'Stericycle', 'Warner Music Group', 'Ventas', 'ScanSource', 'Pinnacle West Capital', 'Scripps Networks Interactive', 'Alexion Pharmaceuticals', 'Pitney Bowes', 'CIT Group', 'Country Financial', 'CUNA Mutual Group', 'Triumph Group', 'TransDigm Group', 'Allegheny Technologies', 'Resolute Forest Products', 'Acuity Brands', 'Abercrombie & Fitch', 'KLA-Tencor', 'Weis Markets', 'Puget Energy', 'Mednax', 'Kar Auction Services', 'PolyOne', 'FMC', 'Edwards Lifesciences', 'Microchip Technology', 'Amerco', 'Mercury General', 'American National Insurance', 'Carters', 'International Flavors & Fragrances', 'Aarons', 'Alliant Energy', 'EQT', 'Monster Beverage', 'BMC Stock Holdings', 'Ryerson Holding', 'Equifax', 'Regal Beloit', 'Old Dominion Freight Line', 'American Water Works', 'BGC Partners', 'Brinks', 'Meritor', 'Sentry Insurance Group', 'Sanderson Farms', 'KapStone Paper & Packaging', 'Gartner', 'IAC/InterActiveCorp', 'Tailored Brands', 'WABCO Holdings', 'Insperity', 'Comerica', 'TriNet Group', 'Avaya Holdings', 'Ashland Global Holdings', 'Meritage Homes', 'SkyWest', 'USG', 'Southwestern Energy', 'Keysight Technologies', 'Regal Entertainment Group', 'Mutual of America Life Insurance', 'Paychex', 'Brinker International', 'Penn National Gaming', 'Gannett', 'Visteon', 'Pinnacle Foods', 'Intuitive Surgical', 'Continental Resources', 'Service Corp. International', 'Scientific Games', 'Albemarle', 'Atmos Energy', 'Hologic', 'H& R Block', 'Qorvo', 'Steelcase', 'Univision Communications', 'Worthington Industries', 'Timken', 'A.O. Smith', 'PriceSmart', 'Stifel Financial', 'Brown-Forman', 'Cinemark Holdings', 'Granite Construction', 'Dycom Industries', 'Clean Harbors', 'First Solar', 'Scotts Miracle-Gro', 'Cracker Barrel Old Country Store', 'Triple-S Management', 'First Republic Bank', 'ServiceMaster Global Holdings', 'PC Connection', 'Genesco', 'Medical Mutual of Ohio', 'MSC Industrial Direct', 'Legg Mason', 'Hyster-Yale Materials Handling', 'Apollo Global Management', 'Citrix Systems', 'Acadia Healthcare', 'Varian Medical Systems', 'Groupon', 'Aleris', 'Sprague Resources', 'Cooper Tire & Rubber', 'Hain Celestial Group', 'Penn Mutual Life Insurance', 'Colony NorthStar', 'ArcBest', 'Presidio', 'TRI Pointe Group', 'Annaly Capital Management', 'G-III Apparel Group', 'AMC Networks', 'Enable Midstream Partners', 'Ciena', 'DSW', 'Convergys', 'Park Hotels & Resorts', 'Pool', 'Fossil Group', 'Dominos Pizza', 'Crane', 'Caleres', 'Tempur Sealy International', 'Tetra Tech', 'Illumina', 'Valmont Industries', 'Hill-Rom Holdings', 'Unisys', 'Zions Bancorp.', 'Sinclair Broadcast Group', 'Louisiana-Pacific', 'Mettler-Toledo International', 'Synopsys', 'Kemper', 'Cabot', 'Great Plains Energy', 'Rent-A-Center', 'Hawaiian Holdings', 'Revlon', 'Syneos Health', 'Public Storage', 'TTM Technologies', 'Vectren', 'Trimble', 'NOW', 'Spirit Airlines', 'ASGN', 'Lincoln Electric Holdings', 'Prologis', 'Range Resources', 'Teledyne Technologies', 'Vishay Intertechnology', 'Boston Properties', 'Applied Industrial Technologies', 'Graham Holdings', 'Amica Mutual Insurance', 'Concho Resources', 'ITT', 'Kansas City Southern', 'MDC Holdings', 'Evergy', 'Pinnacle Entertainment', 'Hawaiian Electric Industries', 'TEGNA', 'Southwest Gas Holdings', 'Vista Outdoor', 'Bon-Ton Stores', 'Super Micro Computer', 'Plexus', 'TrueBlue', 'Magellan Midstream Partners', 'Toro', 'Akamai Technologies', 'Moog', 'Vertex Pharmaceuticals', 'Equity Residential', 'Selective Insurance Group', 'AptarGroup', 'Benchmark Electronics', 'Columbia Sportswear', 'A. Schulman', 'Verso', 'Digital Realty Trust', 'GNC Holdings', 'E*Trade Financial', 'Hovnanian Enterprises', 'Maximus', 'Twitter', 'Par Pacific Holdings', 'Parexel International', 'RH', 'Nexstar Media Group', 'Knight-Swift Transportation Holdings', 'Red Hat', 'Belden', 'Boyd Gaming', 'Primoris Services', 'Gardner Denver', 'Donaldson', 'Party City Holdco', 'J.Crew Group', 'EnerSys', 'Guess', 'Patterson-UTI Energy', 'WGL Holdings', 'Wolverine World Wide', 'Xilinx', 'Vornado Realty Trust', 'Middleby', 'MPM Holdings', 'Cleveland-Cliffs', 'GGP', 'Cypress Semiconductor', 'Arch Coal', 'GMS', 'Waters', 'H.B. Fuller', 'Affiliated Managers Group', 'PerkinElmer', 'Edgewell Personal Care', 'Maxim Integrated Products', 'Knights of Columbus', 'IDEX', 'DST Systems', 'Chicos FAS', 'Nu Skin Enterprises', 'Herman Miller', 'NLV Financial', 'Curtiss-Wright', 'New Jersey Resources', 'REV Group', 'Mueller Industries', 'GEO Group', 'Allison Transmission Holdings', 'OGE Energy', 'Cheesecake Factory', 'PRA Health Sciences', 'Tupperware Brands', 'Euronet Worldwide', 'FLEETCOR Technologies', 'Nationstar Mortgage Holdings', 'GoDaddy', 'Blackhawk Network Holdings', 'Cboe Global Markets', 'Snyders-Lance', 'Murphy Oil', 'CDK Global', 'Texas Roadhouse', 'Kirby', 'Square', 'Genesee & Wyoming', 'Zayo Group Holdings', 'NewMarket', '99 Cents Only Stores', 'PCM', 'Federated Mutual Insurance', 'HNI', 'Hospitality Properties Trust', 'Greenbrier Cos.', 'Bio-Rad Laboratories', 'AvalonBay Communities', 'Renewable Energy Group', 'Atlas Air Worldwide Holdings', 'Teradata', 'LCI Industries', 'Teleflex', 'Verisk Analytics', 'Popular', 'Workday', 'Cooper Cos.', 'Express', 'Teradyne', 'Werner Enterprises', 'Oaktree Capital Group', 'Woodward', 'F5 Networks', 'Valvoline', 'Roadrunner Transportation Systems', 'SemGroup', 'Catalent', 'Quorum Health', 'Universal', 'Nordson', 'ResMed', 'Tower International', 'Freds', 'Foundation Building Materials', 'Kennametal', 'Autodesk', 'Ply Gem Holdings', 'Central Garden & Pet', 'Matson', 'EchoStar', 'Genesis Energy', 'SVB Financial Group', 'Itron', 'Portland General Electric', 'California Resources', 'Esterline Technologies', 'Delta Tucker Holdings', 'AMN Healthcare Services', 'Griffon', 'Valhi', 'Hexcel', 'IDEXX Laboratories', 'Deluxe', 'M/I Homes', 'Kraton', 'Stewart Information Services', 'Marriott Vacations Worldwide', 'SPX FLOW', 'ACCO Brands', 'Echo Global Logistics', 'Cadence Design Systems', 'Nuance Communications', 'Finish Line', 'TransUnion', 'ServiceNow', 'Summit Materials', 'Engility Holdings', 'Ferrellgas Partners', 'Interactive Brokers Group', 'Stepan', 'Oceaneering International', 'Cimarex Energy', 'Rexnord', 'Beazer Homes USA', 'MKS Instruments', 'Vail Resorts', 'Ohio National Mutual', 'TopBuild', 'Brown & Brown', 'Aerojet Rocketdyne Holdings', 'Barnes & Noble Education', 'Superior Energy Services', 'VeriFone Systems', 'Childrens Place', 'Tribune Media', 'Healthcare Services Group', 'SiteOne Landscape Supply', 'Charles River Laboratories Intl', 'CoreLogic', 'Ensign Group', 'HCP'], 'Sector': ['Retailing', 'Energy', 'Financials', 'Technology', 'Health Care', 'Wholesalers', 'Health Care', 'Retailing', 'Telecommunications', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Wholesalers', 'Energy', 'Wholesalers', 'Retailing', 'Telecommunications', 'Food & Drug Stores', 'Industrials', 'Food & Drug Stores', 'Financials', 'Financials', 'Technology', 'Retailing', 'Financials', 'Health Care', 'Financials', 'Aerospace & Defense', 'Energy', 'Health Care', 'Technology', 'Energy', 'Financials', 'Telecommunications', 'Technology', 'Technology', 'Financials', 'Health Care', 'Financials', 'Retailing', 'Retailing', 'Energy', 'Household Products', 'Financials', 'Transportation', 'Food, Beverages & Tobacco', 'Technology', 'Chemicals', 'Food, Beverages & Tobacco', 'Health Care', 'Transportation', 'Aerospace & Defense', 'Financials', 'Food & Drug Stores', 'Wholesalers', 'Media', 'Health Care', 'Health Care', 'Technology', 'Aerospace & Defense', 'Financials', 'Health Care', 'Technology', 'Health Care', 'Energy', 'Industrials', 'Financials', 'Financials', 'Financials', 'Financials', 'Financials', 'Transportation', 'Retailing', 'Health Care', 'Telecommunications', 'Transportation', 'Technology', 'Industrials', 'Health Care', 'Financials', 'Food, Beverages & Tobacco', 'Transportation', 'Technology', 'Wholesalers', 'Financials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Food & Drug Stores', 'Apparel', 'Energy', 'Energy', 'Energy', 'Financials', 'Food & Drug Stores', 'Energy', 'Food, Beverages & Tobacco', 'Industrials', 'Media', 'Aerospace & Defense', 'Financials', 'Financials', 'Industrials', 'Financials', 'Financials', 'Energy', 'Financials', 'Technology', 'Food, Beverages & Tobacco', 'Media', 'Health Care', 'Health Care', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Energy', 'Health Care', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Aerospace & Defense', 'Retailing', 'Wholesalers', 'Financials', 'Retailing', 'Materials', 'Energy', 'Energy', 'Hotels, Restaurants & Leisure', 'Wholesalers', 'Health Care', 'Health Care', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Technology', 'Retailing', 'Energy', 'Financials', 'Financials', 'Retailing', 'Retailing', 'Industrials', 'Transportation', 'Transportation', 'Business Services', 'Technology', 'Health Care', 'Energy', 'Health Care', 'Motor Vehicles & Parts', 'Industrials', 'Technology', 'Materials', 'Health Care', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Industrials', 'Financials', 'Retailing', 'Technology', 'Technology', 'Health Care', 'Business Services', 'Health Care', 'Household Products', 'Engineering & Construction', 'Financials', 'Telecommunications', 'Energy', 'Energy', 'Wholesalers', 'Health Care', 'Wholesalers', 'Retailing', 'Financials', 'Retailing', 'Financials', 'Energy', 'Wholesalers', 'Industrials', 'Health Care', 'Food & Drug Stores', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Household Products', 'Energy', 'Transportation', 'Motor Vehicles & Parts', 'Business Services', 'Technology', 'Chemicals', 'Chemicals', 'Technology', 'Transportation', 'Materials', 'Technology', 'Household Products', 'Media', 'Health Care', 'Chemicals', 'Business Services', 'Technology', 'Business Services', 'Telecommunications', 'Industrials', 'Financials', 'Energy', 'Financials', 'Aerospace & Defense', 'Retailing', 'Financials', 'Engineering & Construction', 'Financials', 'Energy', 'Energy', 'Chemicals', 'Food, Beverages & Tobacco', 'Financials', 'Energy', 'Energy', 'Energy', 'Media', 'Business Services', 'Energy', 'Health Care', 'Aerospace & Defense', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Household Products', 'Technology', 'Engineering & Construction', 'Retailing', 'Energy', 'Energy', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Financials', 'Health Care', 'Financials', 'Apparel', 'Business Services', 'Energy', 'Health Care', 'Materials', 'Wholesalers', 'Retailing', 'Energy', 'Financials', 'Health Care', 'Financials', 'Financials', 'Business Services', 'Energy', 'Industrials', 'Energy', 'Household Products', 'Financials', 'Motor Vehicles & Parts', 'Technology', 'Materials', 'Financials', 'Chemicals', 'Transportation', 'Energy', 'Financials', 'Health Care', 'Energy', 'Energy', 'Energy', 'Retailing', 'Retailing', 'Energy', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Materials', 'Retailing', 'Retailing', 'Hotels, Restaurants & Leisure', 'Retailing', 'Chemicals', 'Health Care', 'Transportation', 'Technology', 'Health Care', 'Wholesalers', 'Retailing', 'Motor Vehicles & Parts', 'Media', 'Technology', 'Technology', 'Industrials', 'Retailing', 'Retailing', 'Business Services', 'Engineering & Construction', 'Retailing', 'Financials', 'Wholesalers', 'Motor Vehicles & Parts', 'Financials', 'Financials', 'Health Care', 'Materials', 'Technology', 'Financials', 'Energy', 'Technology', 'Chemicals', 'Financials', 'Materials', 'Financials', 'Energy', 'Household Products', 'Engineering & Construction', 'Retailing', 'Wholesalers', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Retailing', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Telecommunications', 'Business Services', 'Energy', 'Health Care', 'Retailing', 'Financials', 'Wholesalers', 'Apparel', 'Retailing', 'Energy', 'Retailing', 'Energy', 'Financials', 'Materials', 'Engineering & Construction', 'Retailing', 'Engineering & Construction', 'Industrials', 'Financials', 'Energy', 'Chemicals', 'Wholesalers', 'Industrials', 'Financials', 'Wholesalers', 'Media', 'Wholesalers', 'Chemicals', 'Energy', 'Technology', 'Transportation', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Business Services', 'Industrials', 'Health Care', 'Food, Beverages & Tobacco', 'Retailing', 'Energy', 'Business Services', 'Financials', 'Health Care', 'Engineering & Construction', 'Financials', 'Wholesalers', 'Household Products', 'Energy', 'Household Products', 'Technology', 'Financials', 'Financials', 'Media', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Aerospace & Defense', 'Chemicals', 'Food, Beverages & Tobacco', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Transportation', 'Energy', 'Technology', 'Health Care', 'Retailing', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Materials', 'Transportation', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Financials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Technology', 'Materials', 'Aerospace & Defense', 'Media', 'Aerospace & Defense', 'Transportation', 'Media', 'Materials', 'Technology', 'Financials', 'Financials', 'Industrials', 'Aerospace & Defense', 'Health Care', 'Technology', 'Food, Beverages & Tobacco', 'Financials', 'Industrials', 'Apparel', 'Household Products', 'Retailing', 'Business Services', 'Retailing', 'Wholesalers', 'Materials', 'Engineering & Construction', 'Energy', 'Wholesalers', 'Financials', 'Technology', 'Apparel', 'Retailing', 'Financials', 'Materials', 'Financials', 'Energy', 'Retailing', 'Financials', 'Financials', 'Materials', 'Technology', 'Engineering & Construction', 'Industrials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Motor Vehicles & Parts', 'Financials', 'Chemicals', 'Media', 'Energy', 'Financials', 'Chemicals', 'Materials', 'Energy', 'Financials', 'Retailing', 'Financials', 'Materials', 'Financials', 'Business Services', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Household Products', 'Financials', 'Financials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Health Care', 'Telecommunications', 'Health Care', 'Financials', 'Financials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Technology', 'Financials', 'Financials', 'Household Products', 'Financials', 'Business Services', 'Transportation', 'Energy', 'Wholesalers', 'Energy', 'Technology', 'Financials', 'Business Services', 'Technology', 'Transportation', 'Energy', 'Business Services', 'Energy', 'Business Services', 'Energy', 'Financials', 'Business Services', 'Health Care', 'Retailing', 'Technology', 'Health Care', 'Retailing', 'Household Products', 'Retailing', 'Business Services', 'Food, Beverages & Tobacco', 'Household Products', 'Financials', 'Financials', 'Technology', 'Materials', 'Energy', 'Health Care', 'Technology', 'Telecommunications', 'Wholesalers', 'Materials', 'Technology', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Business Services', 'Apparel', 'Transportation', 'Transportation', 'Household Products', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Energy', 'Food, Beverages & Tobacco', 'Financials', 'Aerospace & Defense', 'Engineering & Construction', 'Health Care', 'Financials', 'Technology', 'Retailing', 'Industrials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Food & Drug Stores', 'Technology', 'Technology', 'Food & Drug Stores', 'Technology', 'Apparel', 'Health Care', 'Hotels, Restaurants & Leisure', 'Technology', 'Technology', 'Energy', 'Health Care', 'Wholesalers', 'Financials', 'Energy', 'Materials', 'Wholesalers', 'Transportation', 'Business Services', 'Wholesalers', 'Engineering & Construction', 'Financials', 'Industrials', 'Financials', 'Technology', 'Wholesalers', 'Food, Beverages & Tobacco', 'Financials', 'Business Services', 'Technology', 'Financials', 'Retailing', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Business Services', 'Technology', 'Technology', 'Apparel', 'Engineering & Construction', 'Retailing', 'Financials', 'Business Services', 'Media', 'Chemicals', 'Materials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Business Services', 'Food & Drug Stores', 'Industrials', 'Health Care', 'Energy', 'Business Services', 'Health Care', 'Materials', 'Financials', 'Household Products', 'Materials', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Financials', 'Materials', 'Engineering & Construction', 'Industrials', 'Energy', 'Business Services', 'Industrials', 'Industrials', 'Retailing', 'Household Products', 'Chemicals', 'Materials', 'Financials', 'Industrials', 'Industrials', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Industrials', 'Transportation', 'Food, Beverages & Tobacco', 'Industrials', 'Energy', 'Technology', 'Transportation', 'Energy', 'Energy', 'Financials', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Retailing', 'Media', 'Technology', 'Energy', 'Chemicals', 'Business Services', 'Media', 'Financials', 'Wholesalers', 'Energy', 'Media', 'Health Care', 'Technology', 'Financials', 'Financials', 'Financials', 'Aerospace & Defense', 'Aerospace & Defense', 'Materials', 'Materials', 'Industrials', 'Retailing', 'Technology', 'Food & Drug Stores', 'Energy', 'Health Care', 'Wholesalers', 'Chemicals', 'Chemicals', 'Health Care', 'Technology', 'Transportation', 'Financials', 'Financials', 'Apparel', 'Chemicals', 'Retailing', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Wholesalers', 'Materials', 'Business Services', 'Industrials', 'Transportation', 'Energy', 'Financials', 'Business Services', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Materials', 'Technology', 'Technology', 'Retailing', 'Motor Vehicles & Parts', 'Business Services', 'Financials', 'Business Services', 'Technology', 'Chemicals', 'Engineering & Construction', 'Transportation', 'Materials', 'Energy', 'Technology', 'Media', 'Financials', 'Business Services', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Media', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Health Care', 'Energy', 'Business Services', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Energy', 'Health Care', 'Financials', 'Technology', 'Household Products', 'Media', 'Materials', 'Industrials', 'Industrials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Media', 'Engineering & Construction', 'Engineering & Construction', 'Business Services', 'Energy', 'Chemicals', 'Hotels, Restaurants & Leisure', 'Health Care', 'Financials', 'Business Services', 'Retailing', 'Retailing', 'Financials', 'Wholesalers', 'Financials', 'Industrials', 'Financials', 'Technology', 'Health Care', 'Health Care', 'Technology', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Transportation', 'Technology', 'Engineering & Construction', 'Financials', 'Apparel', 'Media', 'Energy', 'Technology', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Apparel', 'Hotels, Restaurants & Leisure', 'Industrials', 'Retailing', 'Household Products', 'Engineering & Construction', 'Technology', 'Materials', 'Health Care', 'Technology', 'Financials', 'Media', 'Materials', 'Technology', 'Technology', 'Financials', 'Chemicals', 'Energy', 'Retailing', 'Transportation', 'Household Products', 'Health Care', 'Financials', 'Technology', 'Energy', 'Technology', 'Wholesalers', 'Transportation', 'Business Services', 'Industrials', 'Financials', 'Energy', 'Aerospace & Defense', 'Technology', 'Financials', 'Wholesalers', 'Business Services', 'Financials', 'Energy', 'Industrials', 'Transportation', 'Engineering & Construction', 'Energy', 'Hotels, Restaurants & Leisure', 'Energy', 'Media', 'Energy', 'Household Products', 'Retailing', 'Technology', 'Technology', 'Business Services', 'Energy', 'Industrials', 'Technology', 'Aerospace & Defense', 'Health Care', 'Financials', 'Financials', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Materials', 'Financials', 'Food & Drug Stores', 'Financials', 'Engineering & Construction', 'Technology', 'Technology', 'Energy', 'Health Care', 'Retailing', 'Media', 'Transportation', 'Technology', 'Industrials', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Industrials', 'Industrials', 'Retailing', 'Retailing', 'Industrials', 'Retailing', 'Energy', 'Energy', 'Apparel', 'Technology', 'Financials', 'Industrials', 'Chemicals', 'Energy', 'Financials', 'Technology', 'Energy', 'Wholesalers', 'Technology', 'Chemicals', 'Financials', 'Technology', 'Household Products', 'Technology', 'Financials', 'Industrials', 'Business Services', 'Retailing', 'Household Products', 'Household Products', 'Financials', 'Aerospace & Defense', 'Energy', 'Motor Vehicles & Parts', 'Industrials', 'Business Services', 'Motor Vehicles & Parts', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Household Products', 'Business Services', 'Business Services', 'Financials', 'Technology', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Technology', 'Hotels, Restaurants & Leisure', 'Transportation', 'Business Services', 'Transportation', 'Telecommunications', 'Chemicals', 'Retailing', 'Wholesalers', 'Financials', 'Household Products', 'Financials', 'Transportation', 'Health Care', 'Financials', 'Energy', 'Transportation', 'Technology', 'Motor Vehicles & Parts', 'Health Care', 'Business Services', 'Financials', 'Technology', 'Health Care', 'Retailing', 'Technology', 'Transportation', 'Financials', 'Aerospace & Defense', 'Technology', 'Chemicals', 'Transportation', 'Energy', 'Health Care', 'Health Care', 'Food, Beverages & Tobacco', 'Industrials', 'Health Care', 'Motor Vehicles & Parts', 'Food & Drug Stores', 'Wholesalers', 'Industrials', 'Technology', 'Materials', 'Household Products', 'Transportation', 'Technology', 'Energy', 'Financials', 'Industrials', 'Energy', 'Energy', 'Aerospace & Defense', 'Aerospace & Defense', 'Health Care', 'Materials', 'Chemicals', 'Aerospace & Defense', 'Health Care', 'Media', 'Engineering & Construction', 'Chemicals', 'Financials', 'Hotels, Restaurants & Leisure', 'Industrials', 'Household Products', 'Transportation', 'Technology', 'Technology', 'Retailing', 'Business Services', 'Technology', 'Materials', 'Aerospace & Defense', 'Energy', 'Financials', 'Chemicals', 'Energy', 'Energy', 'Industrials', 'Engineering & Construction', 'Technology', 'Hotels, Restaurants & Leisure', 'Financials', 'Engineering & Construction', 'Financials', 'Aerospace & Defense', 'Retailing', 'Energy', 'Technology', 'Retailing', 'Media', 'Health Care', 'Wholesalers', 'Health Care', 'Business Services', 'Health Care', 'Financials'], 'Industry': ['General Merchandisers', 'Petroleum Refining', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Health Care', 'Health Care: Pharmacy and Other Services', 'Internet Services and Retailing', 'Telecommunications', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Wholesalers: Health Care', 'Petroleum Refining', 'Wholesalers: Health Care', 'General Merchandisers', 'Telecommunications', 'Food and Drug Stores', 'Industrial Machinery', 'Food and Drug Stores', 'Commercial Banks', 'Diversified Financials', 'Internet Services and Retailing', 'Specialty Retailers: Other', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Commercial Banks', 'Aerospace and Defense', 'Petroleum Refining', 'Health Care: Insurance and Managed Care', 'Computer Software', 'Petroleum Refining', 'Commercial Banks', 'Telecommunications', 'Information Technology Services', 'Computers, Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Pharmaceuticals', 'Diversified Financials', 'General Merchandisers', 'Specialty Retailers: Other', 'Petroleum Refining', 'Household and Personal Products', 'Insurance: Life, Health (stock)', 'Mail, Package, and Freight Delivery', 'Food Consumer Products', 'Semiconductors and Other Electronic Components', 'Chemicals', 'Food Production', 'Health Care: Insurance and Managed Care', 'Mail, Package, and Freight Delivery', 'Aerospace and Defense', 'Insurance: Life, Health (stock)', 'Food and Drug Stores', 'Wholesalers: Food and Grocery', 'Entertainment', 'Health Care: Insurance and Managed Care', 'Pharmaceuticals', 'Computers, Office Equipment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Health Care: Insurance and Managed Care', 'Network and Other Communications Equipment', 'Health Care: Medical Facilities', 'Pipelines', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Mutual)', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (Mutual)', 'Commercial Banks', 'Airlines', 'Specialty Retailers: Other', 'Health Care: Insurance and Managed Care', 'Telecommunications', 'Airlines', 'Internet Services and Retailing', 'Electronics, Electrical Equip.', 'Pharmaceuticals', 'Insurance: Property and Casualty (Stock)', 'Food Production', 'Airlines', 'Computer Software', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Life, Health (Mutual)', 'Specialty Retailers: Apparel', 'Diversified Financials', 'Beverages', 'Food and Drug Stores', 'Apparel', 'Petroleum Refining', 'Energy', 'Utilities: Gas and Electric', 'Insurance: Life, Health (Mutual)', 'Food and Drug Stores', 'Mining, Crude-Oil Production', 'Food Production', 'Miscellaneous', 'Entertainment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Construction and Farm Machinery', 'Diversified Financials', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Tobacco', 'Entertainment', 'Pharmaceuticals', 'Medical Products and Equipment', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Pipelines', 'Pharmaceuticals', 'Food Consumer Products', 'Aerospace and Defense', 'Aerospace and Defense', 'General Merchandisers', 'Wholesalers: Food and Grocery', 'Commercial Banks', 'Specialty Retailers: Other', 'Packaging, Containers', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Wholesalers: Electronics and Office Equipment', 'Pharmaceuticals', 'Pharmaceuticals', 'Food Services', 'Food Services', 'Semiconductors and Other Electronic Components', 'Specialty Retailers: Other', 'Petroleum Refining', 'Diversified Financials', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Automotive Retailing, Services', 'Electronics, Electrical Equip.', 'Railroads', 'Airlines', 'Temporary Help', 'Scientific,Photographic and Control Equipment', 'Pharmaceuticals', 'Oil and Gas Equipment, Services', 'Health Care: Medical Facilities', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Semiconductors and Other Electronic Components', 'Metals', 'Health Care: Insurance and Managed Care', 'Engineering, Construction', 'Tobacco', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Stock)', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Health Care: Medical Facilities', 'Financial Data Services', 'Medical Products and Equipment', 'Household and Personal Products', 'Engineering, Construction', 'Commercial Banks', 'Telecommunications', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Wholesalers: Electronics and Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Food and Grocery', 'General Merchandisers', 'Diversified Financials', 'Automotive Retailing, Services', 'Commercial Banks', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Industrial Machinery', 'Health Care: Medical Facilities', 'Food and Drug Stores', 'Specialty Retailers: Apparel', 'Food Consumer Products', 'General Merchandisers', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Transportation and Logistics', 'Motor Vehicles and Parts', 'Advertising, marketing', 'Information Technology Services', 'Chemicals', 'Chemicals', 'Semiconductors and Other Electronic Components', 'Transportation and Logistics', 'Packaging, Containers', 'Information Technology Services', 'Home Equipment, Furnishings', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Chemicals', 'Diversified Outsourcing Services', 'Semiconductors and Other Electronic Components', 'Waste Management', 'Telecommunications', 'Industrial Machinery', 'Insurance: Life, Health (stock)', 'Petroleum Refining', 'Real estate', 'Aerospace and Defense', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (stock)', 'Homebuilders', 'Diversified Financials', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Chemicals', 'Food Consumer Products', 'Insurance: Property and Casualty (Stock)', 'Pipelines', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Entertainment', 'Financial Data Services', 'Energy', 'Pharmaceuticals', 'Aerospace and Defense', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Home Equipment, Furnishings', 'Internet Services and Retailing', 'Homebuilders', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'General Merchandisers', 'Financial Data Services', 'Securities', 'Wholesalers: Health Care', 'Insurance: Life, Health (Mutual)', 'Medical Products and Equipment', 'Diversified Financials', 'Apparel', 'Diversified Outsourcing Services', 'Utilities: Gas and Electric', 'Pharmaceuticals', 'Metals', 'Wholesalers: Food and Grocery', 'Specialty Retailers: Other', 'Pipelines', 'Commercial Banks', 'Medical Products and Equipment', 'Diversified Financials', 'Insurance: Property and Casualty (Mutual)', 'Financial Data Services', 'Utilities: Gas and Electric', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Household and Personal Products', 'Commercial Banks', 'Motor Vehicles and Parts', 'Internet Services and Retailing', 'Metals', 'Commercial Banks', 'Chemicals', 'Railroads', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'Health Care: Medical Facilities', 'Energy', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Automotive Retailing, Services', 'Utilities: Gas and Electric', 'Beverages', 'Aerospace and Defense', 'Packaging, Containers', 'Specialty Retailers: Other', 'Specialty Retailers: Other', 'Hotels, Casinos, Resorts', 'Specialty Retailers: Other', 'Chemicals', 'Medical Products and Equipment', 'Railroads', 'Computer Software', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Internet Services and Retailing', 'Motor Vehicles and Parts', 'Entertainment', 'Computers, Office Equipment', 'Information Technology Services', 'Electronics, Electrical Equip.', 'Automotive Retailing, Services', 'Internet Services and Retailing', 'Waste Management', 'Engineering, Construction', 'Automotive Retailing, Services', 'Diversified Financials', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Metals', 'Semiconductors and Other Electronic Components', 'Diversified Financials', 'Utilities: Gas and Electric', 'Internet Services and Retailing', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Metals', 'Insurance: Life, Health (stock)', 'Mining, Crude-Oil Production', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Specialty Retailers: Other', 'Wholesalers: Health Care', 'Wholesalers: Food and Grocery', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Specialty Retailers: Other', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Telecommunications', 'Financial Data Services', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Specialty Retailers: Other', 'Securities', 'Wholesalers: Diversified', 'Apparel', 'Automotive Retailing, Services', 'Pipelines', 'Automotive Retailing, Services', 'Energy', 'Insurance: Life, Health (stock)', 'Packaging, Containers', 'Engineering, Construction', 'Specialty Retailers: Other', 'Homebuilders', 'Construction and Farm Machinery', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Chemicals', 'Wholesalers: Diversified', 'Construction and Farm Machinery', 'Insurance: Life, Health (stock)', 'Wholesalers: Diversified', 'Publishing, Printing', 'Wholesalers: Food and Grocery', 'Chemicals', 'Energy', 'Semiconductors and Other Electronic Components', 'Airlines', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Advertising, marketing', 'Industrial Machinery', 'Medical Products and Equipment', 'Food Consumer Products', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Financial Data Services', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Diversified', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Home Equipment, Furnishings', 'Information Technology Services', 'Insurance: Property and Casualty (Mutual)', 'Securities', 'Entertainment', 'Insurance: Property and Casualty (Mutual)', 'Food Consumer Products', 'Utilities: Gas and Electric', 'Aerospace and Defense', 'Chemicals', 'Food Consumer Products', 'Petroleum Refining', 'Mining, Crude-Oil Production', 'Beverages', 'Trucking, Truck Leasing', 'Oil and Gas Equipment, Services', 'Computer Software', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Forest and Paper Products', 'Trucking, Truck Leasing', 'Food Services', 'Food Services', 'Diversified Financials', 'Packaging, Containers', 'Building Materials, Glass', 'Entertainment', 'Airlines', 'Network and Other Communications Equipment', 'Miscellaneous', 'Aerospace and Defense', 'Publishing, Printing', 'Aerospace and Defense', 'Transportation and Logistics', 'Entertainment', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Construction and Farm Machinery', 'Aerospace and Defense', 'Health Care: Medical Facilities', 'Information Technology Services', 'Beverages', 'Real estate', 'Industrial Machinery', 'Apparel', 'Household and Personal Products', 'Specialty Retailers: Apparel', 'Miscellaneous', 'Specialty Retailers: Other', 'Wholesalers: Diversified', 'Packaging, Containers', 'Engineering, Construction', 'Utilities: Gas and Electric', 'Wholesalers: Diversified', 'Securities', 'Computers, Office Equipment', 'Apparel', 'Automotive Retailing, Services', 'Commercial Banks', 'Packaging, Containers', 'Insurance: Property and Casualty (Stock)', 'Mining, Crude-Oil Production', 'General Merchandisers', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Building Materials, Glass', 'Network and Other Communications Equipment', 'Homebuilders', 'Electronics, Electrical Equip.', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Chemicals', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Entertainment', 'Utilities: Gas and Electric', 'Diversified Financials', 'Chemicals', 'Packaging, Containers', 'Energy', 'Real estate', 'Specialty Retailers: Apparel', 'Commercial Banks', 'Metals', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Insurance: Property and Casualty (Stock)', 'Specialty Retailers: Other', 'Diversified Outsourcing Services', 'Commercial Banks', 'Household and Personal Products', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Specialty Retailers: Other', 'Food Services', 'Pharmaceuticals', 'Telecommunications', 'Health Care: Insurance and Managed Care', 'Insurance: Life, Health (Mutual)', 'Securities', 'Food Production', 'Hotels, Casinos, Resorts', 'Homebuilders', 'Food Production', 'Information Technology Services', 'Insurance: Property and Casualty (Stock)', 'Insurance: Property and Casualty (Stock)', 'Household and Personal Products', 'Commercial Banks', 'Financial Data Services', 'Transportation Equipment', 'Energy', 'Wholesalers: Health Care', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Real estate', 'Financial Data Services', 'Computers, Office Equipment', 'Transportation Equipment', 'Mining, Crude-Oil Production', 'Diversified Outsourcing Services', 'Energy', 'Diversified Outsourcing Services', 'Mining, Crude-Oil Production', 'Real estate', 'Temporary Help', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Semiconductors and Other Electronic Components', 'Pharmaceuticals', 'Specialty Retailers: Other', 'Home Equipment, Furnishings', 'Specialty Retailers: Other', 'Temporary Help', 'Food Consumer Products', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Diversified Financials', 'Computer Software', 'Forest and Paper Products', 'Mining, Crude-Oil Production', 'Health Care: Pharmacy and Other Services', 'Semiconductors and Other Electronic Components', 'Telecommunications', 'Wholesalers: Electronics and Office Equipment', 'Packaging, Containers', 'Network and Other Communications Equipment', 'Metals', 'Information Technology Services', 'Apparel', 'Chemicals', 'Financial Data Services', 'Apparel', 'Transportation Equipment', 'Trucking, Truck Leasing', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Entertainment', 'Energy', 'Food Consumer Products', 'Securities', 'Aerospace and Defense', 'Engineering, Construction', 'Health Care: Medical Facilities', 'Commercial Banks', 'Internet Services and Retailing', 'Automotive Retailing, Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Hotels, Casinos, Resorts', 'Food and Drug Stores', 'Computers, Office Equipment', 'Scientific,Photographic and Control Equipment', 'Food and Drug Stores', 'Network and Other Communications Equipment', 'Apparel', 'Health Care: Pharmacy and Other Services', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Information Technology Services', 'Energy', 'Health Care: Medical Facilities', 'Wholesalers: Diversified', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Packaging, Containers', 'Wholesalers: Diversified', 'Trucking, Truck Leasing', 'Education', 'Wholesalers: Diversified', 'Homebuilders', 'Real estate', 'Construction and Farm Machinery', 'Real estate', 'Information Technology Services', 'Wholesalers: Diversified', 'Beverages', 'Real estate', 'Diversified Outsourcing Services', 'Scientific,Photographic and Control Equipment', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Securities', 'Mining, Crude-Oil Production', 'Food Services', 'Financial Data Services', 'Computer Software', 'Semiconductors and Other Electronic Components', 'Apparel', 'Engineering, Construction', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Publishing, Printing', 'Chemicals', 'Building Materials, Glass', 'Packaging, Containers', 'Packaging, Containers', 'Computer Software', 'Transportation and Logistics', 'Financial Data Services', 'Food and Drug Stores', 'Industrial Machinery', 'Medical Products and Equipment', 'Petroleum Refining', 'Financial Data Services', 'Health Care: Medical Facilities', 'Building Materials, Glass', 'Securities', 'Home Equipment, Furnishings', 'Building Materials, Glass', 'Specialty Retailers: Other', 'Food Consumer Products', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Building Materials, Glass', 'Homebuilders', 'Industrial Machinery', 'Energy', 'Diversified Outsourcing Services', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Chemicals', 'Building Materials, Glass', 'Diversified Financials', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Food Production', 'Securities', 'Securities', 'Electronics, Electrical Equip.', 'Transportation Equipment', 'Food Production', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Pipelines', 'Oil and Gas Equipment, Services', 'Securities', 'Packaging, Containers', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Specialty Retailers: Apparel', 'Publishing, Printing', 'Internet Services and Retailing', 'Energy', 'Chemicals', 'Waste Management', 'Entertainment', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Utilities: Gas and Electric', 'Entertainment', 'Pharmaceuticals', 'Computers, Office Equipment', 'Commercial Banks', 'Insurance: Property and Casualty (Mutual)', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Aerospace and Defense', 'Metals', 'Forest and Paper Products', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Food and Drug Stores', 'Utilities: Gas and Electric', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Chemicals', 'Chemicals', 'Medical Products and Equipment', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (stock)', 'Apparel', 'Chemicals', 'Specialty Retailers: Other', 'Utilities: Gas and Electric', 'Energy', 'Beverages', 'Wholesalers: Diversified', 'Metals', 'Financial Data Services', 'Electronics, Electrical Equip.', 'Trucking, Truck Leasing', 'Miscellaneous', 'Securities', 'Diversified Outsourcing Services', 'Diversified Outsourcing Services', 'Insurance: Property and Casualty (Mutual)', 'Food Production', 'Packaging, Containers', 'Information Technology Services', 'Internet Services and Retailing', 'Specialty Retailers: Apparel', 'Motor Vehicles and Parts', 'Diversified Outsourcing Services', 'Commercial Banks', 'Diversified Outsourcing Services', 'Information Technology Services', 'Chemicals', 'Homebuilders', 'Airlines', 'Building Materials, Glass', 'Mining, Crude-Oil Production', 'Scientific,Photographic and Control Equipment', 'Entertainment', 'Insurance: Life, Health (Mutual)', 'Diversified Outsourcing Services', 'Food Services', 'Hotels, Casinos, Resorts', 'Publishing, Printing', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Medical Products and Equipment', 'Mining, Crude-Oil Production', 'Miscellaneous', 'Hotels, Casinos, Resorts', 'Chemicals', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Diversified Financials', 'Semiconductors and Other Electronic Components', 'Home Equipment, Furnishings', 'Entertainment', 'Metals', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'General Merchandisers', 'Securities', 'Beverages', 'Entertainment', 'Engineering, Construction', 'Engineering, Construction', 'Waste Management', 'Energy', 'Chemicals', 'Food Services', 'Health Care: Insurance and Managed Care', 'Commercial Banks', 'Diversified Outsourcing Services', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (Mutual)', 'Wholesalers: Diversified', 'Securities', 'Industrial Machinery', 'Securities', 'Computer Software', 'Health Care: Medical Facilities', 'Medical Products and Equipment', 'Internet Services and Retailing', 'Metals', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Insurance: Life, Health (stock)', 'Real estate', 'Trucking, Truck Leasing', 'Information Technology Services', 'Homebuilders', 'Diversified Financials', 'Apparel', 'Entertainment', 'Pipelines', 'Network and Other Communications Equipment', 'Specialty Retailers: Apparel', 'Diversified Outsourcing Services', 'Real estate', 'Wholesalers: Diversified', 'Apparel', 'Food Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Scientific,Photographic and Control Equipment', 'Metals', 'Medical Products and Equipment', 'Information Technology Services', 'Commercial Banks', 'Entertainment', 'Building Materials, Glass', 'Scientific,Photographic and Control Equipment', 'Computer Software', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Airlines', 'Household and Personal Products', 'Health Care: Pharmacy and Other Services', 'Real estate', 'Semiconductors and Other Electronic Components', 'Utilities: Gas and Electric', 'Scientific,Photographic and Control Equipment', 'Wholesalers: Diversified', 'Airlines', 'Temporary Help', 'Industrial Machinery', 'Real estate', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Semiconductors and Other Electronic Components', 'Real estate', 'Wholesalers: Diversified', 'Education', 'Insurance: Property and Casualty (Mutual)', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Railroads', 'Homebuilders', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Utilities: Gas and Electric', 'Entertainment', 'Utilities: Gas and Electric', 'Miscellaneous', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Temporary Help', 'Pipelines', 'Construction and Farm Machinery', 'Internet Services and Retailing', 'Aerospace and Defense', 'Pharmaceuticals', 'Real estate', 'Insurance: Property and Casualty (Stock)', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Apparel', 'Chemicals', 'Forest and Paper Products', 'Real estate', 'Food and Drug Stores', 'Securities', 'Homebuilders', 'Information Technology Services', 'Internet Services and Retailing', 'Petroleum Refining', 'Health Care: Pharmacy and Other Services', 'Specialty Retailers: Other', 'Entertainment', 'Trucking, Truck Leasing', 'Computer Software', 'Electronics, Electrical Equip.', 'Hotels, Casinos, Resorts', 'Engineering, Construction', 'Industrial Machinery', 'Industrial Machinery', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Oil and Gas Equipment, Services', 'Energy', 'Apparel', 'Semiconductors and Other Electronic Components', 'Real estate', 'Industrial Machinery', 'Chemicals', 'Mining, Crude-Oil Production', 'Real estate', 'Semiconductors and Other Electronic Components', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Scientific,Photographic and Control Equipment', 'Chemicals', 'Securities', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Semiconductors and Other Electronic Components', 'Insurance: Life, Health (Mutual)', 'Industrial Machinery', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Home Equipment, Furnishings', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Energy', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Miscellaneous', 'Motor Vehicles and Parts', 'Utilities: Gas and Electric', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Financial Data Services', 'Financial Data Services', 'Diversified Financials', 'Internet Services and Retailing', 'Financial Data Services', 'Securities', 'Food Consumer Products', 'Mining, Crude-Oil Production', 'Computer Software', 'Food Services', 'Shipping', 'Financial Data Services', 'Railroads', 'Telecommunications', 'Chemicals', 'Specialty Retailers: Other', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Home Equipment, Furnishings', 'Real estate', 'Transportation Equipment', 'Scientific,Photographic and Control Equipment', 'Real estate', 'Energy', 'Transportation and Logistics', 'Information Technology Services', 'Motor Vehicles and Parts', 'Medical Products and Equipment', 'Financial Data Services', 'Commercial Banks', 'Computer Software', 'Medical Products and Equipment', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Securities', 'Aerospace and Defense', 'Network and Other Communications Equipment', 'Chemicals', 'Trucking, Truck Leasing', 'Pipelines', 'Pharmaceuticals', 'Health Care: Medical Facilities', 'Tobacco', 'Industrial Machinery', 'Medical Products and Equipment', 'Motor Vehicles and Parts', 'Food and Drug Stores', 'Wholesalers: Diversified', 'Industrial Machinery', 'Computer Software', 'Building Materials, Glass', 'Household and Personal Products', 'Shipping', 'Network and Other Communications Equipment', 'Pipelines', 'Commercial Banks', 'Electronics, Electrical Equip.', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Aerospace and Defense', 'Health Care: Pharmacy and Other Services', 'Building Materials, Glass', 'Chemicals', 'Aerospace and Defense', 'Medical Products and Equipment', 'Publishing, Printing', 'Homebuilders', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Hotels, Casinos, Resorts', 'Industrial Machinery', 'Home Equipment, Furnishings', 'Transportation and Logistics', 'Computer Software', 'Computer Software', 'Specialty Retailers: Apparel', 'Financial Data Services', 'Computer Software', 'Building Materials, Glass', 'Aerospace and Defense', 'Energy', 'Securities', 'Chemicals', 'Oil and Gas Equipment, Services', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Homebuilders', 'Semiconductors and Other Electronic Components', 'Hotels, Casinos, Resorts', 'Insurance: Life, Health (stock)', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Aerospace and Defense', 'Specialty Retailers: Other', 'Oil and Gas Equipment, Services', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Health Care: Pharmacy and Other Services', 'Financial Data Services', 'Health Care: Medical Facilities', 'Real estate'], 'City': ['Bentonville', 'Irving', 'Omaha', 'Cupertino', 'Minnetonka', 'SF', 'Woonsocket', 'Seattle', 'Dallas', 'Detroit', 'Dearborn', 'Chesterbrook', 'San Ramon', 'Dublin', 'Issaquah', 'New York', 'Cincinnati', 'Boston', 'Deerfield', 'New York', 'Leavenworth', 'Mountain View', 'Atlanta', 'Charlotte', 'St. Louis', 'SF', 'Chicago', 'Houston', 'Indianapolis', 'Redmond', 'San Antonio', 'New York', 'Philadelphia', 'Armonk', 'Round Rock', 'Bloomington', 'New Brunswick', 'McLean', 'Minneapolis', 'Mooresville', 'Findlay', 'Cincinnati', 'New York', 'Atlanta', 'Harrison', 'Santa Clara', 'Midland', 'Chicago', 'Hartford', 'Memphis', 'Farmington', 'Newark', 'Boise', 'Houston', 'Burbank', 'Louisville', 'New York', 'Palo Alto', 'Bethesda', 'New York', 'St. Louis', 'San Jose', 'Nashville', 'Dallas', 'Deerfield', 'Columbus', 'New York', 'Boston', 'New York', 'New York', 'Fort Worth', 'Richfield', 'Bloomfield', 'Stamford', 'Atlanta', 'Menlo Park', 'Morris Plains', 'Kenilworth', 'Northbrook', 'Springdale', 'Chicago', 'Redwood City', 'Clearwater', 'New York', 'Framingham', 'New York', 'Atlanta', 'Lakeland', 'Beaverton', 'San Antonio', 'Miami', 'Chicago', 'Springfield', 'Camp Hill', 'Houston', 'Inver Grove Heights', 'St Paul', 'New York', 'Falls Church', 'San Antonio', 'McLean', 'Moline', 'New York', 'Milwaukee', 'Houston', 'New York', 'Palo Alto', 'New York', 'New York', 'North Chicago', 'Lake Bluff', 'Mayfield', 'Centennial', 'Pittsburgh', 'Houston', 'San Mateo', 'Deerfield', 'Falls Church', 'Waltham', 'Cincinnati', 'Rosemont', 'Minneapolis', 'Goodlettsville', 'Memphis', 'Charlotte', 'Atlanta', 'Bethesda', 'Phoenix', 'Indianapolis', 'Thousand Oaks', 'Oak Brook', 'Seattle', 'San Diego', 'Chesapeake', 'Parsippany-Troy Hills', 'New York', 'Columbus', 'Fort Lauderdale', 'Bloomfield Hills', 'Benton Harbor', 'Omaha', 'Dallas', 'Milwaukee', 'Waltham', 'New York', 'Houston', 'Dallas', 'Southfield', 'Columbus', 'Boise', 'Charlotte', 'Long Beach', 'Irving', 'Richmond', 'Bellevue', 'Hartford', 'Menomonee Falls', 'San Jose', 'St. Petersburg', 'Franklin', 'SF', 'Leavenworth', 'Irving', 'Los Angeles', 'Pittsburgh', 'Monroe', 'North Palm Beach', 'SF', 'Fremont', 'Tampa', 'Richmond', 'Hoffman Estates', 'Stamford', 'Richmond', 'New York City', 'Phoenix', 'Atlanta', 'St. Louis', 'Denver', 'Eden Prairie', 'SF', 'Minneapolis', 'Seattle', 'New York', 'Columbus', 'Greenwich', 'Akron', 'New York', 'Lincolnshire', 'Cleveland', 'Pittsburgh', 'Dallas', 'Eden Prairie', 'Atlanta', 'Teaneck', 'Hoboken', 'New York', 'Nashville', 'St. Louis', 'Philadelphia', 'Santa Clara', 'Houston', 'Englewood', 'Glenview', 'Wayne', 'Dallas', 'Los Angeles', 'Providence', 'Dublin', 'Des Moines', 'Arlington', 'New York', 'Oklahoma City', 'Arlington', 'St Paul', 'Arden Hills', 'New York', 'Houston', 'Akron', 'Houston', 'New York', 'San Jose', 'Tulsa', 'Summit', 'New York', 'Battle Creek', 'Las Vegas', 'New Britain', 'Norwalk', 'Miami', 'Columbus', 'Detroit', 'Richmond', 'Chesterfield', 'Plano', 'Harrison', 'New York', 'Melville', 'New York', 'Kalamazoo', 'New York', 'Greensboro', 'Roseland', 'Rosemead', 'Cambridge', 'Pittsburgh', 'South San Francisco', 'Union', 'Tulsa', 'Winston-Salem', 'Franklin Lakes', 'Minneapolis', 'Los Angeles', 'New York', 'New York', 'Cleveland', 'The Woodlands', 'New York', 'Boston', 'Palo Alto', 'Los Gatos', 'Pittsburgh', 'Riverwoods', 'Danbury', 'Jacksonville', 'Minneapolis', 'Chattanooga', 'King of Prussia', 'Princeton', 'Houston', 'San Diego', 'Wayne', 'Houston', 'New Orleans', 'Denver', 'New York', 'Broomfield', 'Memphis', 'El Dorado', 'Las Vegas', 'Boca Raton', 'The Woodlands', 'Deerfield', 'Norfolk', 'SF', 'Burlington', 'Lake Forest', 'Englewood', 'Auburn Hills', 'Beverly Hills', 'Norwalk', 'Reston', 'Corning', 'Medford', 'Bellevue', 'Phoenix', 'Dallas', 'Charlotte', 'Detroit', 'Chicago', 'Auburn Hills', 'Jacksonville', 'Atlanta', 'Durham', 'Los Angeles', 'Santa Clara', 'New York', 'Houston', 'San Jose', 'Kingsport', 'Madison', 'Fort Wayne', 'Newport Beach', 'Oklahoma City', 'Calhoun', 'Houston', 'Roanoke', 'Mechanicsville', 'Providence', 'Lake Forest', 'Chicago', 'Grapevine', 'Austin', 'McLean', 'Norwalk', 'Jacksonville', 'Newark', 'Marlboro', 'Springfield', 'SF', 'Waltham', 'New York', 'Parsippany-Troy Hills', 'Houston', 'Estero', 'Houston', 'Omaha', 'Philadelphia', 'Omaha', 'Coraopolis', 'Atlanta', 'Lisle', 'Minneapolis', 'Denver', 'Allentown', 'Atlanta', 'Duluth', 'Richmond', 'Downers Grove', 'New York', 'Byron Center', 'Houston', 'Tulsa', 'Fremont', 'Seattle', 'Chicago', 'Glenview', 'Camden', 'New York', 'Downers Grove', 'Warsaw', 'Dallas', 'New York', 'Springfield', 'Plano', 'Cincinnati', 'Secaucus', 'Norwalk', 'Greenwich', 'Pittsburgh', 'New York', 'Milwaukee', 'Livonia', 'Tysons', 'Lansing', 'Des Peres', 'Englewood', 'Erie', 'Hershey', 'Allentown', 'Newport News', 'Plymouth', 'Orrville', 'Brentwood', 'Greenwood Village', 'Victor', 'Miami', 'Houston', 'San Jose', 'Brentwood', 'Brentwood', 'Elkhart', 'Maumee', 'Seattle', 'Lowell', 'Orlando', 'Plano', 'New York', 'Evansville', 'Dallas', 'Santa Monica', 'Wilco inc', 'Wallingford', 'El Segundo', 'Wichita', 'Chicago', 'Melbourne', 'Seattle', 'Silver Spring', 'Perrysburg', 'San Jose', 'Cleveland', 'Cincinnati', 'Oshkosh', 'Cedar Rapids', 'Louisville', 'Tempe', 'Plano', 'Boston', 'Everett', 'New York', 'New York', 'Mahwah', 'Stamford', 'Ankeny', 'St. Louis', 'Glendale', 'Coral Gables', 'Jackson', 'Atlanta', 'St. Petersburg', 'Atlanta', 'Winston-Salem', 'Duluth', 'Providence', 'Lake Forest', 'New York', 'Houston', 'Little Rock', 'New York', 'San Mateo', 'Toledo', 'Chicago', 'Reston', 'Milwaukee', 'Oak Brook', 'Las Vegas', 'Clayton', 'Detroit', 'Chicago', 'Wilmington', 'San Antonio', 'St. Louis', 'Rolling Meadows', 'Irving', 'Charlotte', 'King of Prussia', 'Madison', 'Burlington', 'Birmingham', 'Beckett Ridge', 'St Paul', 'New York', 'Glen Allen', 'Westlake', 'Florham Park', 'Buffalo', 'Oakland', 'New York', 'New York', 'Bolingbrook', 'Louisville', 'Tarrytown', 'Little Rock', 'Scottsdale', 'Cincinnati', 'Atlanta', 'Westchester', 'Parsippany-Troy Hills', 'Horsham', 'Merriam', 'McLean', 'Santa Ana', 'Fairfield', 'Rye', 'Chicago', 'Brookfield', 'Milwaukee', 'Houston', 'St Paul', 'St. Louis', 'Phoenix', 'Indianapolis', 'Englewood', 'Sunnyvale', 'Medina', 'Irving', 'New York', 'Irving', 'Cincinnati', 'New York', 'Bethesda', 'Troy', 'Kennett Square', 'Irving', 'Santa Clara', 'Parsippany-Troy Hills', 'SF', 'Deerfield', 'Columbus', 'Menlo Park', 'St. Louis', 'Pawtucket', 'Worcester', 'Wilmington', 'Mountain View', 'Fort Mill', 'Houston', 'North Kansas City', 'Norwood', 'Chicago', 'Deerfield', 'Hartsville', 'Sunnyvale', 'Irving', 'Falls Church', 'Baltimore', 'Medina', 'Columbus', 'SF', 'Libertyville', 'Overland Park', 'El Segundo', 'Johnston', 'Merrillville', 'Las Vegas', 'Redwood City', 'Houston', 'Sparks Glencoe', 'Baltimore', 'Sterling', 'Los Angeles', 'Brentwood', 'Columbus', 'Boston', 'New Braunfels', 'Rye Brook', 'Dallas', 'Chicago', 'Phoenix', 'North Canton', 'Sarasota', 'Commerce', 'Newton', 'New York City', 'Flint', 'Denver', 'Santa Clara', 'Reston', 'Bismarck', 'Mechanicsburg', 'Boise', 'New York', 'Cayce', 'Atlanta', 'Winona', 'Green Bay', 'Baltimore', 'Herndon', 'Los Angeles', 'Redwood City', 'Westport', 'Houston', 'Arlington', 'Miami', 'Charlotte', 'Toledo', 'Boca Raton', 'Berwyn', 'Carmel', 'Lincolnshire', 'Boston', 'Houston', 'Tampa', 'New York', 'Mountain View', 'Tempe', 'Manhattan Beach', 'Houston', 'New York', 'McKinney', 'Lake Success', 'Sussex', 'Deerfield', 'Scottsdale', 'Stamford', 'Neenah', 'New York', 'Oak Brook', 'Cincinnati', 'Black Mountain', 'Kenosha', 'York', 'Indianapolis', 'Atlanta', 'Birmingham', 'Raleigh', 'New York', 'Carthage', 'Grand Rapids', 'Denton', 'Thomasville', 'New York', 'West Des Moines', 'Birmingham', 'Scottsdale', 'Wilmerding', 'Houston', 'Boston', 'Richardson', 'Highland Heights', 'Pittsburgh', 'Ewing Township', 'West Palm Beach', 'Charlotte', 'Evansville', 'Annapolis Junction', 'Lincolnshire', 'Maumee', 'Omaha', 'Leavenworth', 'Shelton', 'Dallas', 'Irving', 'Irving', 'Denver', 'Woburn', 'Jacksonville', 'Houston', 'Houston', 'Chicago', 'Delaware', 'The Woodlands', 'Novi', 'Philadelphia', 'Chicago', 'Southlake', 'Omaha', 'Columbus', 'Lake Forest', 'New York', 'Chicago', 'Greenville', 'Phoenix', 'Knoxville', 'New Haven', 'Stamford', 'New York', 'Bloomington', 'Madison', 'Berwyn', 'Cleveland', 'Pittsburgh', 'Catawba', 'Atlanta', 'New Albany', 'Milpitas', 'Sunbury', 'Bellevue', 'Fort Lauderdale', 'Carmel', 'Avon Lake', 'Philadelphia', 'Irvine', 'Chandler', 'Reno', 'Los Angeles', 'Galveston', 'Atlanta', 'New York', 'Atlanta', 'Madison', 'Pittsburgh', 'Corona', 'Atlanta', 'Chicago', 'Atlanta', 'Beloit', 'Thomasville', 'Voorhees Township', 'New York', 'Richmond', 'Richmond', 'Stevens Point', 'Laurel', 'Northbrook', 'Stamford', 'New York City', 'Houston', 'Rochester Hills', 'Houston', 'Dallas', 'San Leandro', 'Santa Clara', 'Covington', 'Scottsdale', 'St. George', 'Chicago', 'Spring', 'Santa Rosa', 'Knoxville', 'New York', 'Rochester', 'Dallas', 'Wyomissing', 'McLean', 'Van Buren Charter Township', 'Parsippany-Troy Hills', 'Sunnyvale', 'Oklahoma City', 'Houston', 'Las Vegas', 'Charlotte', 'Dallas', 'Marlboro', 'KCMO', 'Greensboro', 'Grand Rapids', 'New York', 'Columbus', 'North Canton', 'Milwaukee', 'San Diego', 'St. Louis', 'Louisville', 'Plano', 'Watsonville', 'Palm Beach Gardens', 'Norwell', 'Tempe', 'Marysville', 'Lebanon', 'San Juan', 'SF', 'Memphis', 'Merrimack', 'Nashville', 'Cleveland', 'Melville', 'Baltimore', 'Cleveland', 'New York', 'Fort Lauderdale', 'Franklin', 'Palo Alto', 'Chicago', 'Cleveland', 'Portsmouth', 'Findlay', 'Lake Success', 'Horsham', 'Los Angeles', 'Fort Smith', 'New York', 'Irvine', 'New York', 'New York', 'New York', 'Oklahoma City', 'Hanover', 'Columbus', 'Cincinnati', 'McLean', 'Covington', 'Richardson', 'Ann Arbor', 'Stamford', 'St. Louis', 'Lexington', 'Pasadena', 'San Diego', 'Omaha', 'Chicago', 'Blue Bell', 'Salt Lake City', 'Cockeysville', 'Nashville', 'Columbus', 'Mountain View', 'Chicago', 'Boston', 'KCMO', 'Plano', 'Honolulu', 'New York', 'Raleigh', 'Glendale', 'Costa Mesa', 'Evansville', 'Sunnyvale', 'Houston', 'Miramar', 'Calabasas', 'Cleveland', 'SF', 'Fort Worth', 'Thousand Oaks', 'Malvern', 'Boston', 'Cleveland', 'Arlington', 'Lincoln', 'Midland', 'White Plains', 'KCMO', 'Denver', 'Topeka', 'Las Vegas', 'Honolulu', 'McLean', 'Las Vegas', 'Farmington', 'York', 'San Jose', 'Neenah', 'Tacoma', 'Tulsa', 'Bloomington', 'Cambridge', 'Elma Center', 'Boston', 'Chicago', 'Branchville', 'Crystal Lake', 'Scottsdale', 'Portland', 'Fairlawn', 'Miamisburg', 'SF', 'Pittsburgh', 'New York', 'Red Bank', 'Reston', 'SF', 'Houston', 'Waltham', 'Corte Madera', 'Irving', 'Phoenix', 'Raleigh', 'St. Louis', 'Las Vegas', 'Dallas', 'Milwaukee', 'Minneapolis', 'Elmsford', 'New York', 'Reading', 'Los Angeles', 'Houston', 'Leavenworth', 'Rockford', 'San Jose', 'New York', 'Elgin', 'Waterford', 'Cleveland', 'Chicago', 'San Jose', 'St. Louis', 'Tucker', 'Milford', 'St Paul', 'West Palm Beach', 'Waltham', 'Chesterfield', 'San Jose', 'New Haven', 'Lake Forest', 'KCMO', 'Fort Myers', 'Provo', 'Zeeland', 'Montpelier', 'Charlotte', 'Wall Township', 'Milwaukee', 'Memphis', 'Boca Raton', 'Indianapolis', 'Oklahoma City', 'Calabasas', 'Raleigh', 'Orlando', 'Leawood', 'Norcross', 'Coppell', 'Scottsdale', 'Pleasanton', 'Chicago', 'Charlotte', 'El Dorado', 'Hoffman Estates', 'Louisville', 'Houston', 'SF', 'Darien', 'Boulder', 'Richmond', 'Commerce', 'El Segundo', 'Owatonna', 'Muscatine', 'Newton', 'Lake Oswego', 'Hercules', 'Arlington', 'Ames', 'Harrison', 'Dayton', 'Elkhart', 'Wayne', 'Jersey City', 'Hato Rey', 'Pleasanton', 'Pleasanton', 'Columbus', 'North Reading', 'Omaha', 'Los Angeles', 'Fort Collins', 'Seattle', 'Lexington', 'Downers Grove', 'Tulsa', 'Franklin Township', 'Brentwood', 'Richmond', 'Westlake', 'San Diego', 'Livonia', 'Memphis', 'Tustin', 'Pittsburgh', 'San Rafael', 'Cary', 'Walnut Creek', 'Honolulu', 'Englewood', 'Houston', 'Santa Clara', 'Liberty Lake', 'Portland', 'Los Angeles', 'Bellevue', 'McLean', 'San Diego', 'New York', 'Dallas', 'Stamford', 'Westbrook', 'Shoreview', 'Columbus', 'Houston', 'Houston', 'Orlando', 'Charlotte', 'Lake Zurich', 'Chicago', 'San Jose', 'Burlington', 'Indianapolis', 'Chicago', 'Santa Clara', 'Denver', 'Chantilly', 'Overland Park', 'Greenwich', 'Northfield', 'Houston', 'Denver', 'Milwaukee', 'Atlanta', 'Andover', 'Broomfield', 'Cincinnati', 'Daytona Beach', 'Daytona Beach', 'El Segundo', 'Bernards', 'Houston', 'San Jose', 'Secaucus', 'Chicago', 'Bensalem', 'Roswell', 'Wilmington', 'Irvine', 'Mission Viejo', 'Irvine'], 'State': ['AR', 'TX', 'NE', 'CA', 'MN', 'CA', 'RI', 'WA', 'TX', 'MI', 'MI', 'PA', 'CA', 'OH', 'WA', 'NY', 'OH', 'MA', 'IL', 'NY', 'WA', 'CA', 'GA', 'NC', 'MO', 'CA', 'IL', 'TX', 'IN', 'WA', 'TX', 'NY', 'PA', 'NY', 'TX', 'IL', 'NJ', 'VA', 'MN', 'NC', 'OH', 'OH', 'NY', 'GA', 'NY', 'CA', 'Michigan', 'IL', 'CT', 'TN', 'CT', 'NJ', 'ID', 'TX', 'CA', 'KY', 'NY', 'CA', 'MD', 'NY', 'MO', 'CA', 'TN', 'TX', 'IL', 'OH', 'NY', 'MA', 'NY', 'NY', 'TX', 'MN', 'CT', 'CT', 'GA', 'CA', 'NJ', 'NJ', 'IL', 'AR', 'IL', 'CA', 'FL', 'NY', 'MA', 'NY', 'GA', 'FL', 'OR', 'TX', 'FL', 'IL', 'MA', 'PA', 'TX', 'MN', 'MN', 'NY', 'VA', 'TX', 'VA', 'IL', 'NY', 'WI', 'TX', 'NY', 'CA', 'NY', 'NY', 'IL', 'IL', 'OH', 'CO', 'PA', 'TX', 'CA', 'IL', 'VA', 'MA', 'OH', 'IL', 'MN', 'TN', 'TN', 'NC', 'GA', 'MD', 'AZ', 'IN', 'CA', 'IL', 'WA', 'CA', 'VA', 'NJ', 'NY', 'GA', 'FL', 'MI', 'MI', 'NE', 'TX', 'WI', 'MA', 'NY', 'TX', 'TX', 'MI', 'IN', 'ID', 'NC', 'CA', 'TX', 'VA', 'WA', 'CT', 'WI', 'CA', 'FL', 'TN', 'CA', 'WA', 'TX', 'CA', 'PA', 'LA', 'FL', 'CA', 'CA', 'FL', 'VA', 'IL', 'CT', 'VA', 'NY', 'AZ', 'GA', 'MO', 'CO', 'MN', 'CA', 'MN', 'WA', 'NY', 'OH', 'CT', 'OH', 'NY', 'IL', 'OH', 'PA', 'TX', 'MN', 'GA', 'NJ', 'NJ', 'NY', 'TN', 'MO', 'PA', 'CA', 'TX', 'CO', 'IL', 'PA', 'TX', 'CA', 'RI', 'CA', 'IA', 'TX', 'NY', 'OK', 'VA', 'MN', 'MN', 'NY', 'TX', 'OH', 'TX', 'NY', 'CA', 'OK', 'NJ', 'NY', 'MI', 'NV', 'CT', 'CT', 'FL', 'OH', 'MI', 'VA', 'MO', 'TX', 'NY', 'NY', 'NY', 'NY', 'MI', 'NY', 'NC', 'NJ', 'CA', 'MA', 'PA', 'CA', 'NJ', 'OK', 'NC', 'NJ', 'MN', 'CA', 'NY', 'NY', 'OH', 'TX', 'NY', 'MA', 'CA', 'CA', 'PA', 'IL', 'CT', 'FL', 'MN', 'TN', 'PA', 'NJ', 'TX', 'CA', 'NJ', 'TX', 'LA', 'CO', 'NY', 'CO', 'TN', 'AR', 'NV', 'FL', 'TX', 'IL', 'VA', 'CA', 'NC', 'IL', 'CO', 'MI', 'CA', 'CT', 'VA', 'NY', 'OR', 'WA', 'AZ', 'TX', 'NC', 'MI', 'IL', 'MI', 'FL', 'GA', 'NC', 'CA', 'CA', 'NY', 'TX', 'CA', 'TN', 'WI', 'IN', 'CA', 'OK', 'GA', 'TX', 'VA', 'VA', 'RI', 'IL', 'IL', 'TX', 'MN', 'VA', 'CT', 'FL', 'NJ', 'MA', 'MO', 'CA', 'MA', 'NY', 'NJ', 'TX', 'FL', 'TX', 'NE', 'PA', 'NE', 'PA', 'GA', 'IL', 'MN', 'CO', 'PA', 'GA', 'GA', 'VA', 'IL', 'NY', 'MI', 'TX', 'OK', 'CA', 'WA', 'IL', 'IL', 'NJ', 'NY', 'IL', 'IN', 'TX', 'NY', 'MA', 'TX', 'OH', 'NJ', 'CT', 'CT', 'PA', 'NY', 'WI', 'MI', 'VA', 'MI', 'MO', 'CO', 'PA', 'PA', 'PA', 'VA', 'MN', 'OH', 'TN', 'CO', 'NY', 'FL', 'TX', 'CA', 'TN', 'TN', 'IN', 'OH', 'WA', 'AR', 'FL', 'TX', 'NY', 'IN', 'TX', 'CA', 'NY', 'CT', 'CA', 'KS', 'IL', 'FL', 'WA', 'MD', 'OH', 'CA', 'OH', 'OH', 'WI', 'IA', 'KY', 'AZ', 'TX', 'MA', 'WA', 'NY', 'NY', 'NJ', 'CT', 'IA', 'MO', 'CA', 'FL', 'MI', 'GA', 'FL', 'GA', 'NC', 'GA', 'RI', 'IL', 'NY', 'TX', 'AR', 'NY', 'CA', 'OH', 'IL', 'VA', 'WI', 'IL', 'NV', 'MO', 'MI', 'IL', 'DE', 'TX', 'MO', 'IL', 'TX', 'NC', 'PA', 'NJ', 'NJ', 'AL', 'OH', 'MN', 'NY', 'VA', 'OH', 'NJ', 'NY', 'CA', 'NY', 'NY', 'IL', 'KY', 'NY', 'AR', 'AZ', 'OH', 'GA', 'IL', 'NJ', 'PA', 'KS', 'VA', 'CA', 'OH', 'NY', 'IL', 'WI', 'WI', 'TX', 'MN', 'MO', 'AZ', 'IN', 'CO', 'CA', 'MN', 'TX', 'NY', 'TX', 'OH', 'NY', 'MD', 'MI', 'PA', 'TX', 'CA', 'NJ', 'CA', 'IL', 'OH', 'CA', 'MO', 'RI', 'MA', 'DE', 'CA', 'SC', 'TX', 'MO', 'MA', 'IL', 'IL', 'SC', 'CA', 'TX', 'VA', 'MD', 'OH', 'GA', 'CA', 'IL', 'KS', 'CA', 'RI', 'IN', 'NV', 'CA', 'TX', 'MD', 'MD', 'VA', 'CA', 'TN', 'OH', 'MA', 'TX', 'NY', 'TX', 'IL', 'AZ', 'OH', 'FL', 'CA', 'NC', 'NY', 'MI', 'CO', 'CA', 'VA', 'ND', 'PA', 'ID', 'NY', 'SC', 'GA', 'MN', 'WI', 'MD', 'VA', 'CA', 'CA', 'CT', 'TX', 'VA', 'FL', 'NC', 'OH', 'FL', 'PA', 'IN', 'IL', 'MA', 'TX', 'FL', 'NY', 'CA', 'AZ', 'CA', 'TX', 'NY', 'TX', 'NY', 'WI', 'IL', 'AZ', 'CT', 'WI', 'NY', 'IL', 'OH', 'NC', 'WI', 'PA', 'IN', 'GA', 'AL', 'NC', 'NY', 'MO', 'MI', 'TX', 'GA', 'NY', 'IA', 'AL', 'AZ', 'PA', 'TX', 'MA', 'TX', 'KY', 'PA', 'NJ', 'FL', 'NC', 'IN', 'MD', 'IL', 'OH', 'NE', 'WA', 'CT', 'TX', 'TX', 'TX', 'CO', 'MA', 'FL', 'TX', 'TX', 'IL', 'OH', 'TX', 'MI', 'PA', 'IL', 'TX', 'NE', 'OH', 'IL', 'NY', 'IL', 'SC', 'AZ', 'TN', 'CT', 'CT', 'NY', 'IL', 'WI', 'PA', 'OH', 'PA', 'SC', 'GA', 'OH', 'CA', 'PA', 'WA', 'FL', 'IN', 'OH', 'PA', 'CA', 'AZ', 'NV', 'CA', 'TX', 'GA', 'NY', 'GA', 'WI', 'PA', 'CA', 'GA', 'IL', 'GA', 'WI', 'NC', 'NJ', 'NY', 'VA', 'VA', 'WI', 'MS', 'IL', 'CT', 'NY', 'TX', 'MI', 'TX', 'TX', 'CA', 'CA', 'KY', 'AZ', 'UT', 'IL', 'TX', 'CA', 'TN', 'NY', 'NY', 'TX', 'PA', 'VA', 'MI', 'NJ', 'CA', 'OK', 'TX', 'NV', 'NC', 'TX', 'MA', 'MO', 'NC', 'MI', 'NY', 'OH', 'OH', 'WI', 'CA', 'MO', 'KY', 'TX', 'CA', 'FL', 'MA', 'AZ', 'OH', 'TN', 'Puerto Rico', 'CA', 'TN', 'NH', 'TN', 'OH', 'NY', 'MD', 'OH', 'NY', 'FL', 'TN', 'CA', 'IL', 'OH', 'NH', 'OH', 'NY', 'PA', 'CA', 'AR', 'NY', 'CA', 'NY', 'NY', 'NY', 'OK', 'MD', 'OH', 'OH', 'VA', 'LA', 'TX', 'MI', 'CT', 'MO', 'KY', 'CA', 'CA', 'NE', 'IL', 'PA', 'UT', 'MD', 'TN', 'OH', 'CA', 'IL', 'MA', 'MO', 'TX', 'HI', 'NY', 'NC', 'CA', 'CA', 'IN', 'CA', 'TX', 'FL', 'CA', 'OH', 'CA', 'TX', 'CA', 'PA', 'MA', 'OH', 'VA', 'RI', 'TX', 'NY', 'MO', 'CO', 'KS', 'NV', 'HI', 'VA', 'NV', 'UT', 'PA', 'CA', 'WI', 'WA', 'OK', 'MN', 'MA', 'NY', 'MA', 'IL', 'NJ', 'IL', 'AZ', 'OR', 'OH', 'OH', 'CA', 'PA', 'NY', 'NJ', 'VA', 'CA', 'TX', 'MA', 'CA', 'TX', 'AZ', 'NC', 'MO', 'NV', 'TX', 'WI', 'MN', 'NY', 'NY', 'PA', 'CA', 'TX', 'WA', 'MI', 'CA', 'NY', 'IL', 'NY', 'OH', 'IL', 'CA', 'MO', 'GA', 'MA', 'MN', 'FL', 'MA', 'MO', 'CA', 'CT', 'IL', 'MO', 'FL', 'UT', 'MI', 'VT', 'NC', 'NJ', 'WI', 'TN', 'FL', 'IN', 'OK', 'CA', 'NC', 'FL', 'KS', 'GA', 'TX', 'AZ', 'CA', 'IL', 'NC', 'AR', 'IL', 'KY', 'TX', 'CA', 'CT', 'CO', 'VA', 'CA', 'CA', 'MN', 'IA', 'MA', 'OR', 'CA', 'VA', 'IA', 'NY', 'OH', 'IN', 'PA', 'NJ', 'Puerto Rico', 'CA', 'CA', 'OH', 'MA', 'NE', 'CA', 'CO', 'WA', 'KY', 'IL', 'OK', 'NJ', 'TN', 'VA', 'OH', 'CA', 'MI', 'TN', 'CA', 'PA', 'CA', 'NC', 'CA', 'HI', 'CO', 'TX', 'CA', 'WA', 'OR', 'CA', 'WA', 'VA', 'CA', 'NY', 'TX', 'CT', 'ME', 'MN', 'OH', 'TX', 'TX', 'FL', 'NC', 'IL', 'IL', 'CA', 'MA', 'IN', 'IL', 'CA', 'CO', 'VA', 'KS', 'CT', 'IL', 'TX', 'CO', 'WI', 'GA', 'MA', 'CO', 'OH', 'FL', 'FL', 'CA', 'NJ', 'TX', 'CA', 'NJ', 'IL', 'PA', 'GA', 'MA', 'CA', 'CA', 'CA'], 'Latitude': [36.372853799999994, 32.814017699999994, 41.2565369, 37.322997799999996, 44.9211836, 37.7749295, 42.0028761, 47.6062095, 32.7766642, 42.331427000000005, 42.3222599, 40.0756627, 37.7799273, 40.0992294, 47.5301011, 40.7127753, 39.103118200000004, 42.360082500000004, 42.171136499999996, 40.7127753, 47.751074100000004, 37.3860517, 33.7489954, 35.2270869, 38.6270025, 37.7749295, 41.8781136, 29.7604267, 39.768403, 47.6739881, 29.4241219, 40.7127753, 39.9525839, 41.1264849, 30.508255100000003, 40.4842027, 40.4862157, 38.933867600000006, 44.977753, 35.5848596, 41.04422, 39.103118200000004, 40.7127753, 33.7489954, 41.0400135, 37.354107899999995, 43.623574, 41.8781136, 41.76580429999999, 35.1495343, 41.7360305, 40.735657, 43.6150186, 29.7604267, 34.18083920000001, 38.252664700000004, 40.7127753, 37.441883399999995, 38.984652000000004, 40.7127753, 38.6270025, 37.338208200000004, 36.1626638, 32.7766642, 42.171136499999996, 39.9611755, 40.7127753, 42.360082500000004, 40.7127753, 40.7127753, 32.7554883, 44.8832982, 41.826488, 41.0534302, 33.7489954, 37.4529598, 40.839592200000006, 40.6764911, 42.127526700000004, 36.18674420000001, 41.8781136, 37.4852152, 27.9658533, 40.7127753, 42.279286, 40.7127753, 33.7489954, 28.039465399999997, 45.4887993, 29.4241219, 25.7616798, 41.8781136, 42.1014831, 40.2398118, 29.7604267, 44.8480218, 44.953702899999996, 40.7127753, 38.882334, 29.4241219, 38.933867600000006, 41.5067003, 40.7127753, 43.0389025, 29.7604267, 40.7127753, 37.441883399999995, 40.7127753, 40.7127753, 42.325578, 42.304505, 41.55199520000001, 39.5807452, 40.440624799999995, 29.7604267, 37.558546500000006, 42.171136499999996, 38.882334, 42.376485200000005, 39.103118200000004, 41.9867507, 44.977753, 36.3231066, 35.1495343, 35.2270869, 33.7489954, 38.984652000000004, 33.4483771, 39.768403, 34.1705609, 41.8397865, 47.6062095, 32.715738, 36.7682088, 40.865286499999996, 40.7127753, 32.4609764, 26.122438600000002, 42.583645000000004, 42.1167065, 41.2565369, 32.7766642, 43.0389025, 42.376485200000005, 40.7127753, 29.7604267, 32.7766642, 42.473368799999996, 39.201440399999996, 43.6150186, 35.2270869, 33.770050399999995, 32.814017699999994, 37.540724600000004, 47.6101497, 41.76580429999999, 43.1788967, 37.338208200000004, 27.767600800000004, 35.9250637, 37.7749295, 47.751074100000004, 32.814017699999994, 34.0522342, 40.440624799999995, 32.5093109, 26.879781899999998, 37.7749295, 37.548269700000006, 27.950575, 37.540724600000004, 42.062991499999995, 41.0534302, 37.540724600000004, 40.7127753, 33.4483771, 33.7489954, 38.6270025, 39.739235799999996, 44.8546856, 37.7749295, 44.977753, 47.6062095, 40.7127753, 39.9611755, 41.0262417, 41.081444700000006, 40.7127753, 42.190024900000004, 41.499320000000004, 40.440624799999995, 32.7766642, 44.8546856, 33.7489954, 40.8932469, 40.7439905, 40.7127753, 36.1626638, 38.6270025, 39.9525839, 37.354107899999995, 29.7604267, 39.647765299999996, 42.069750899999995, 40.0462208, 32.7766642, 34.0522342, 41.8239891, 37.7021521, 41.600544799999994, 32.735687, 40.7127753, 35.4675602, 38.8816208, 44.953702899999996, 45.0502435, 40.7127753, 29.7604267, 41.081444700000006, 29.7604267, 40.7127753, 37.338208200000004, 36.1539816, 40.714637599999996, 40.7127753, 42.32115220000001, 36.169941200000004, 41.6612104, 41.117744, 25.7616798, 39.9611755, 42.331427000000005, 37.540724600000004, 38.6631083, 33.0198431, 41.0400135, 40.7127753, 40.793432200000005, 40.7127753, 42.291706899999994, 40.7127753, 36.072635399999996, 40.8206555, 34.0805651, 42.373615799999996, 40.440624799999995, 37.654656, 40.697589799999996, 36.1539816, 36.0998596, 41.016763899999994, 44.977753, 34.165357, 40.7127753, 40.7127753, 41.499320000000004, 30.1658207, 40.7127753, 42.360082500000004, 37.441883399999995, 37.235807799999996, 40.440624799999995, 42.167525399999995, 41.394816999999996, 30.3321838, 44.977753, 35.0456297, 40.101285600000004, 40.3572976, 29.7604267, 32.715738, 40.925372499999995, 29.7604267, 29.9510658, 39.739235799999996, 40.7127753, 39.9205411, 35.1495343, 33.20763, 36.169941200000004, 26.3683064, 30.1658207, 42.171136499999996, 36.8507689, 37.7749295, 36.0956918, 42.2586342, 39.647765299999996, 42.6875323, 34.073620399999996, 41.117744, 38.9586307, 42.1428521, 42.3265152, 47.6101497, 33.4483771, 32.7766642, 35.2270869, 42.331427000000005, 41.8781136, 42.6875323, 30.3321838, 33.7489954, 35.980513, 34.0522342, 37.354107899999995, 40.7127753, 29.7604267, 37.338208200000004, 36.548434, 43.07305170000001, 41.079273, 33.618882899999996, 35.4675602, 34.502587, 29.7604267, 37.270970399999996, 37.6087561, 41.8239891, 42.2586342, 41.8781136, 32.9342919, 43.6666296, 38.933867600000006, 41.117744, 30.3321838, 40.735657, 42.3459271, 37.20895720000001, 37.7749295, 42.376485200000005, 40.7127753, 40.865286499999996, 29.7604267, 26.438136, 29.7604267, 41.2565369, 39.9525839, 41.2565369, 40.5184013, 33.7489954, 41.801140999999994, 44.977753, 39.739235799999996, 40.6022939, 33.7489954, 34.0028786, 37.540724600000004, 41.8089191, 40.7127753, 42.812250799999994, 29.7604267, 36.1539816, 37.548269700000006, 47.6062095, 41.8781136, 42.069750899999995, 39.9259463, 40.7127753, 41.8089191, 41.2381, 32.7766642, 40.7127753, 42.1014831, 33.0198431, 39.103118200000004, 40.7895453, 41.117744, 41.0262417, 40.440624799999995, 40.7127753, 43.0389025, 42.36837, 38.9187222, 42.732535, 38.59722, 39.647765299999996, 42.1292241, 40.2859239, 40.6022939, 37.087082099999996, 45.01051939999999, 40.843666299999995, 36.0331164, 39.6172101, 42.982563299999995, 25.7616798, 29.7604267, 37.338208200000004, 36.0331164, 36.0331164, 41.6819935, 41.5628294, 47.6062095, 36.25535429999999, 28.5383355, 33.0198431, 40.7127753, 37.9715592, 32.7766642, 34.01945429999999, 40.744679, 41.45701079999999, 33.9191799, 37.6871761, 41.8781136, 28.0836269, 47.6062095, 38.9906657, 41.556996000000005, 37.338208200000004, 41.499320000000004, 39.103118200000004, 44.024706200000004, 41.9778795, 38.252664700000004, 33.4255104, 33.0198431, 42.360082500000004, 47.9789848, 40.7127753, 40.7127753, 41.0886216, 41.0534302, 41.7317884, 38.6270025, 34.1425078, 25.72149, 42.245869, 33.7489954, 27.767600800000004, 33.7489954, 36.0998596, 34.0028786, 41.8239891, 42.2586342, 40.7127753, 29.7604267, 34.7464809, 40.7127753, 37.562991700000005, 41.6528052, 41.8781136, 38.9586307, 43.0389025, 41.8397865, 36.169941200000004, 38.6425518, 42.331427000000005, 41.8781136, 39.7390721, 29.4241219, 38.6270025, 42.0841936, 32.814017699999994, 35.2270869, 40.101285600000004, 40.7598227, 40.071222, 33.5206608, 39.332126200000005, 44.953702899999996, 40.7127753, 37.665978, 41.4553232, 40.787878000000006, 42.8864468, 37.8043637, 40.7127753, 40.7127753, 41.6986416, 38.252664700000004, 41.076207700000005, 34.7464809, 33.494170399999994, 39.103118200000004, 33.7489954, 41.8498339, 40.865286499999996, 40.1784422, 39.023616499999996, 38.933867600000006, 33.7454725, 39.345467299999996, 40.980653499999995, 41.8781136, 43.0605671, 43.0389025, 29.7604267, 44.953702899999996, 38.6270025, 33.4483771, 39.768403, 39.647765299999996, 37.368829999999996, 45.0352411, 32.814017699999994, 40.7127753, 32.814017699999994, 39.103118200000004, 40.7127753, 38.984652000000004, 42.6064095, 39.84677670000001, 32.814017699999994, 37.354107899999995, 40.865286499999996, 37.7749295, 42.171136499999996, 39.9611755, 37.4529598, 38.6270025, 41.878710999999996, 42.262593200000005, 39.7390721, 37.3860517, 35.007369700000005, 29.7604267, 39.1429081, 42.1943909, 41.8781136, 42.171136499999996, 34.3740431, 37.368829999999996, 32.814017699999994, 38.882334, 39.2903848, 41.143245, 32.4609764, 37.7749295, 42.2333571, 38.9822282, 33.9191799, 41.820519899999994, 41.482814399999995, 36.169941200000004, 37.4852152, 29.7604267, 39.530938899999995, 39.2903848, 38.962489899999994, 34.3058279, 36.0331164, 39.9611755, 42.360082500000004, 29.7030024, 41.0192641, 32.7766642, 41.8781136, 33.4483771, 40.875890999999996, 27.3364347, 34.0005691, 35.7344538, 40.7127753, 43.012527399999996, 39.739235799999996, 37.354107899999995, 38.9586307, 46.808326799999996, 40.2142565, 43.6150186, 40.7127753, 33.9657091, 33.7489954, 44.0553908, 44.51331879999999, 39.2903848, 38.9695545, 34.0522342, 37.4852152, 41.141471700000004, 29.7604267, 38.8816208, 25.7616798, 35.2270869, 41.6528052, 26.3683064, 40.045823999999996, 39.978371, 42.190024900000004, 42.360082500000004, 29.7604267, 27.950575, 40.7127753, 37.3860517, 33.4255104, 33.884736100000005, 29.7604267, 40.7127753, 33.197246500000006, 40.7706572, 43.13418, 42.171136499999996, 33.494170399999994, 41.0534302, 44.1858193, 40.7127753, 41.8397865, 39.2807348, 35.6178951, 42.5847425, 39.9625984, 39.768403, 33.7489954, 33.5206608, 35.7795897, 40.7127753, 37.176446999999996, 42.9633599, 33.2148412, 30.8365815, 40.7127753, 41.5772115, 33.5206608, 33.494170399999994, 40.3909023, 29.7604267, 42.360082500000004, 32.948333500000004, 39.033116899999996, 40.440624799999995, 40.2599864, 26.7153424, 35.2270869, 37.9715592, 39.1202934, 42.190024900000004, 41.5628294, 41.2565369, 47.751074100000004, 41.3164856, 32.7766642, 32.814017699999994, 32.814017699999994, 39.739235799999996, 42.479261799999996, 30.3321838, 29.7604267, 29.7604267, 41.8781136, 40.2986724, 30.1658207, 42.48059, 39.9525839, 41.8781136, 32.9412363, 41.2565369, 39.9611755, 42.2586342, 40.7127753, 41.8781136, 34.8526176, 33.4483771, 35.96063839999999, 41.308274, 41.0534302, 40.7127753, 40.4842027, 43.07305170000001, 40.045823999999996, 41.499320000000004, 40.440624799999995, 34.85292329999999, 33.7489954, 40.0811745, 37.4323341, 40.862584999999996, 47.6101497, 26.1669711, 39.978371, 41.50531779999999, 39.9525839, 33.6845673, 33.3061605, 39.529632899999996, 34.0522342, 29.3013479, 33.7489954, 40.7127753, 33.7489954, 43.07305170000001, 40.440624799999995, 33.8752935, 33.7489954, 41.8781136, 33.7489954, 42.5083482, 35.8826369, 39.851944700000004, 40.7127753, 37.540724600000004, 37.540724600000004, 44.5235792, 31.694050899999997, 42.127526700000004, 41.0534302, 40.7127753, 29.7604267, 42.6583661, 30.0575359, 32.7766642, 37.7249296, 37.354107899999995, 39.083671200000005, 33.494170399999994, 37.0965278, 41.8781136, 30.079940500000003, 38.440428999999995, 35.96063839999999, 40.7127753, 43.156577899999995, 32.7766642, 40.329537, 38.933867600000006, 42.2203171, 40.865286499999996, 37.368829999999996, 35.4675602, 29.7604267, 36.169941200000004, 35.2270869, 32.7766642, 42.3459271, 39.0997265, 36.072635399999996, 42.9633599, 40.7127753, 39.9611755, 40.875890999999996, 43.0389025, 32.715738, 38.6270025, 38.252664700000004, 33.0198431, 36.910231, 26.8233946, 42.1615157, 33.4255104, 40.2364486, 36.2081098, 18.465539399999997, 37.7749295, 35.1495343, 42.867869299999995, 36.1626638, 41.499320000000004, 40.793432200000005, 39.2903848, 41.499320000000004, 40.7127753, 26.122438600000002, 35.9250637, 37.441883399999995, 41.8781136, 41.499320000000004, 43.071755200000005, 41.04422, 40.7706572, 40.1784422, 34.0522342, 35.385924200000005, 40.7127753, 33.6845673, 40.7127753, 40.7127753, 40.7127753, 35.4675602, 39.1955042, 39.9611755, 39.103118200000004, 38.933867600000006, 30.4754702, 32.948333500000004, 42.2808256, 41.0534302, 38.6270025, 38.040583700000006, 34.1477849, 32.715738, 41.2565369, 41.8781136, 40.1523309, 40.760779299999996, 39.490001299999996, 36.1626638, 39.9611755, 37.3860517, 41.8781136, 42.360082500000004, 39.0997265, 33.0198431, 21.3069444, 40.7127753, 35.7795897, 34.1425078, 33.6412156, 37.9715592, 37.368829999999996, 29.7604267, 25.9860762, 34.1367208, 41.499320000000004, 37.7749295, 32.7554883, 34.1705609, 40.0362184, 42.360082500000004, 41.499320000000004, 38.8816208, 41.911012299999996, 31.9973456, 41.0339862, 39.0997265, 39.739235799999996, 39.0473451, 36.169941200000004, 21.3069444, 38.933867600000006, 36.169941200000004, 40.9804999, 39.9625984, 37.338208200000004, 44.1858193, 47.252876799999996, 36.1539816, 44.840798, 42.373615799999996, 42.82122879999999, 42.360082500000004, 41.8781136, 41.1464852, 42.2411344, 33.494170399999994, 45.5122308, 41.127833, 39.642836200000005, 37.7749295, 40.440624799999995, 40.7127753, 40.347054299999996, 38.9586307, 37.7749295, 29.7604267, 42.376485200000005, 37.92548060000001, 32.814017699999994, 33.4483771, 35.7795897, 38.6270025, 36.169941200000004, 32.7766642, 43.0389025, 44.977753, 41.055096899999995, 40.7127753, 40.335648299999995, 34.0522342, 29.7604267, 47.751074100000004, 43.1200272, 37.338208200000004, 40.7127753, 42.035408399999994, 42.7925777, 41.499320000000004, 41.8781136, 37.338208200000004, 38.6270025, 33.8545479, 42.13985770000001, 44.953702899999996, 26.7153424, 42.376485200000005, 38.6631083, 37.338208200000004, 41.308274, 42.2586342, 39.0997265, 26.640628000000003, 40.233843799999995, 42.8125246, 44.2600593, 35.2270869, 40.160666600000006, 43.0389025, 35.1495343, 26.3683064, 39.768403, 35.4675602, 34.1372953, 35.7795897, 28.5383355, 38.966673, 33.969864, 32.954568699999996, 33.494170399999994, 37.6624312, 41.8781136, 35.2270869, 33.20763, 42.062991499999995, 38.252664700000004, 29.7604267, 37.7749295, 41.0771914, 40.0149856, 37.540724600000004, 34.0005691, 33.9191799, 44.085557200000004, 41.424473, 42.337041299999996, 45.4156817, 38.017144099999996, 38.8816208, 42.03078120000001, 41.0400135, 39.758947799999994, 41.6819935, 40.041599600000005, 40.7281575, 18.4225782, 37.6624312, 37.6624312, 39.9611755, 42.5750939, 41.2565369, 34.0522342, 40.5852602, 47.6062095, 38.040583700000006, 41.8089191, 36.1539816, 40.497603999999995, 36.0331164, 37.540724600000004, 41.4553232, 32.715738, 42.36837, 35.1495343, 33.7420005, 40.440624799999995, 37.9735346, 35.791540000000005, 37.9100783, 21.3069444, 39.647765299999996, 29.7604267, 37.354107899999995, 47.6743428, 45.5122308, 34.0522342, 47.6101497, 38.933867600000006, 32.715738, 40.7127753, 32.7766642, 41.0534302, 43.6770252, 45.0791325, 39.9611755, 29.7604267, 29.7604267, 28.5383355, 35.2270869, 42.1969689, 41.8781136, 37.338208200000004, 42.5047161, 39.768403, 41.8781136, 37.354107899999995, 39.739235799999996, 38.8942786, 38.9822282, 41.0262417, 42.09975, 29.7604267, 39.739235799999996, 43.0389025, 33.7489954, 42.6583356, 39.9205411, 39.103118200000004, 29.2108147, 29.2108147, 33.9191799, 40.7066174, 29.7604267, 37.338208200000004, 40.7895453, 41.8781136, 40.0994425, 34.0232431, 42.5481714, 33.6845673, 33.596891299999996, 33.6845673], 'Longitude': [-94.2088172, -96.9488945, -95.93450340000001, -122.03218229999999, -93.46874890000001, -122.4194155, -71.5147839, -122.33207079999998, -96.7969879, -83.0457538, -83.1763145, -75.4590816, -121.9780153, -83.11407709999999, -122.03261909999999, -74.0059728, -84.5120196, -71.0588801, -87.8445119, -74.0059728, -120.7401385, -122.08385109999999, -84.3879824, -80.8431267, -90.1994042, -122.4194155, -87.62979820000001, -95.36980279999999, -86.158068, -122.12151200000001, -98.4936282, -74.0059728, -75.1652215, -73.71401949999999, -97.678896, -88.9936873, -74.4518188, -77.17726040000001, -93.2650108, -80.81007240000001, -83.6499321, -84.5120196, -74.0059728, -84.3879824, -73.71444770000001, -121.9552356, -84.232105, -87.62979820000001, -72.6733723, -90.0489801, -72.795027, -74.1723667, -116.2023137, -95.36980279999999, -118.30896609999998, -85.7584557, -74.0059728, -122.14301950000001, -77.09470920000001, -74.0059728, -90.1994042, -121.88632859999998, -86.78160159999999, -96.7969879, -87.8445119, -82.9987942, -74.0059728, -71.0588801, -74.0059728, -74.0059728, -97.3307658, -93.28300209999999, -72.73009449999999, -73.53873409999999, -84.3879824, -122.1817252, -74.4818698, -74.2907032, -87.82895479999999, -94.1288141, -87.62979820000001, -122.2363548, -82.8001026, -74.0059728, -71.4161565, -74.0059728, -84.3879824, -81.9498042, -122.80133319999999, -98.4936282, -80.1917902, -87.62979820000001, -72.589811, -76.91997420000001, -95.36980279999999, -93.0427153, -93.0899578, -74.0059728, -77.17109140000001, -98.4936282, -77.17726040000001, -90.5151342, -74.0059728, -87.9064736, -95.36980279999999, -74.0059728, -122.14301950000001, -74.0059728, -74.0059728, -87.8411818, -87.89607120000001, -81.4392828, -104.87717260000001, -79.9958864, -95.36980279999999, -122.2710788, -87.8445119, -77.17109140000001, -71.2356113, -84.5120196, -87.87216020000001, -93.2650108, -86.7133302, -90.0489801, -80.8431267, -84.3879824, -77.09470920000001, -112.07403729999999, -86.158068, -118.83759369999999, -87.9535534, -122.33207079999998, -117.1610838, -76.2874927, -74.41738769999999, -74.0059728, -84.9877094, -80.13731740000001, -83.24548829999999, -86.4541894, -95.93450340000001, -96.7969879, -87.9064736, -71.2356113, -74.0059728, -95.36980279999999, -96.7969879, -83.2218731, -85.9213796, -116.2023137, -80.8431267, -118.1937395, -96.9488945, -77.4360481, -122.2015159, -72.6733723, -88.1173132, -121.88632859999998, -82.6402915, -86.86888990000001, -122.4194155, -120.7401385, -96.9488945, -118.24368490000002, -79.9958864, -92.1193012, -80.0533743, -122.4194155, -121.98857190000001, -82.4571776, -77.4360481, -88.1227199, -73.53873409999999, -77.4360481, -74.0059728, -112.07403729999999, -84.3879824, -90.1994042, -104.990251, -93.470786, -122.4194155, -93.2650108, -122.33207079999998, -74.0059728, -82.9987942, -73.62819640000001, -81.51900529999999, -74.0059728, -87.90840390000001, -81.6943605, -79.9958864, -96.7969879, -93.470786, -84.3879824, -74.0116536, -74.0323626, -74.0059728, -86.78160159999999, -90.1994042, -75.1652215, -121.9552356, -95.36980279999999, -104.98775970000001, -87.7878408, -75.3599105, -96.7969879, -118.24368490000002, -71.4128343, -121.9357918, -93.6091064, -97.10806559999999, -74.0059728, -97.5164276, -77.0909809, -93.0899578, -93.15661120000001, -74.0059728, -95.36980279999999, -81.51900529999999, -95.36980279999999, -74.0059728, -121.88632859999998, -95.992775, -74.3646122, -74.0059728, -85.17971419999999, -115.13982959999998, -72.77954190000001, -73.4081575, -80.1917902, -82.9987942, -83.0457538, -77.4360481, -90.5770675, -96.6988856, -73.71444770000001, -74.0059728, -73.4151214, -74.0059728, -85.5872286, -74.0059728, -79.79197540000001, -74.2937594, -118.072846, -71.10973349999999, -79.9958864, -122.40774979999999, -74.2631635, -95.992775, -80.24421600000001, -74.2057011, -93.2650108, -118.6089752, -74.0059728, -74.0059728, -81.6943605, -95.4612625, -74.0059728, -71.0588801, -122.14301950000001, -121.96237509999999, -79.9958864, -87.897014, -73.45401109999999, -81.655651, -93.2650108, -85.3096801, -75.38355250000001, -74.66722259999999, -95.36980279999999, -117.1610838, -74.2765441, -95.36980279999999, -90.0715323, -104.990251, -74.0059728, -105.0866504, -90.0489801, -92.66626740000001, -115.13982959999998, -80.1289321, -95.4612625, -87.8445119, -76.28587259999999, -122.4194155, -79.43779909999999, -87.840625, -104.98775970000001, -83.23410279999999, -118.40035630000001, -73.4081575, -77.35700279999999, -77.05469029999999, -122.8755949, -122.2015159, -112.07403729999999, -96.7969879, -80.8431267, -83.0457538, -87.62979820000001, -83.23410279999999, -81.655651, -84.3879824, -78.90511, -118.24368490000002, -121.9552356, -74.0059728, -95.36980279999999, -121.88632859999998, -82.5618186, -89.4012302, -85.13935129999999, -117.9298493, -97.5164276, -84.9510542, -95.36980279999999, -79.9414266, -77.37331390000001, -71.4128343, -87.840625, -87.62979820000001, -97.07806540000001, -92.9746367, -77.17726040000001, -73.4081575, -81.655651, -74.1723667, -71.55228740000001, -93.2922989, -122.4194155, -71.2356113, -74.0059728, -74.41738769999999, -95.36980279999999, -81.8067523, -95.36980279999999, -95.93450340000001, -75.1652215, -95.93450340000001, -80.1667247, -84.3879824, -88.0747875, -93.2650108, -104.990251, -75.4714098, -84.3879824, -84.1446376, -77.4360481, -88.01117459999999, -74.0059728, -85.7228061, -95.36980279999999, -95.992775, -121.98857190000001, -122.33207079999998, -87.62979820000001, -87.7878408, -75.1196199, -74.0059728, -88.01117459999999, -85.85304690000001, -96.7969879, -74.0059728, -72.589811, -96.6988856, -84.5120196, -74.05652979999999, -73.4081575, -73.62819640000001, -79.9958864, -74.0059728, -87.9064736, -83.3527097, -77.2310925, -84.55553470000001, -90.448126, -104.98775970000001, -80.085059, -76.6502468, -75.4714098, -76.4730122, -93.4555093, -81.7640212, -86.7827772, -104.95081409999999, -77.40887940000002, -80.1917902, -95.36980279999999, -121.88632859999998, -86.7827772, -86.7827772, -85.9766671, -83.6538244, -122.33207079999998, -94.1307587, -81.3792365, -96.6988856, -74.0059728, -87.5710898, -96.7969879, -118.4911912, -73.94854240000001, -72.82307359999999, -118.4164652, -97.330053, -87.62979820000001, -80.6081089, -122.33207079999998, -77.026088, -83.627157, -121.88632859999998, -81.6943605, -84.5120196, -88.5426136, -91.66562320000001, -85.7584557, -111.9400054, -96.6988856, -71.0588801, -122.2020795, -74.0059728, -74.0059728, -74.1435843, -73.53873409999999, -93.6001278, -90.1994042, -118.255075, -80.2683838, -84.4013462, -84.3879824, -82.6402915, -84.3879824, -80.24421600000001, -84.1446376, -71.4128343, -87.840625, -74.0059728, -95.36980279999999, -92.2895948, -74.0059728, -122.32552539999999, -83.53786740000001, -87.62979820000001, -77.35700279999999, -87.9064736, -87.9535534, -115.13982959999998, -90.32372629999999, -83.0457538, -87.62979820000001, -75.5397878, -98.4936282, -90.1994042, -88.0131275, -96.9488945, -80.8431267, -75.38355250000001, -74.417097, -74.8648873, -86.80249, -84.41726659999999, -93.0899578, -74.0059728, -77.5063739, -81.9179173, -74.38820720000001, -78.8783689, -122.27111370000002, -74.0059728, -74.0059728, -88.0683955, -85.7584557, -73.85874609999999, -92.2895948, -111.9260519, -84.5120196, -84.3879824, -87.8806738, -74.41738769999999, -75.1285061, -94.69357009999999, -77.17726040000001, -117.867653, -84.56031870000001, -73.6837399, -87.62979820000001, -88.1064787, -87.9064736, -95.36980279999999, -93.0899578, -90.1994042, -112.07403729999999, -86.158068, -104.98775970000001, -122.03634960000001, -93.5824586, -96.9488945, -74.0059728, -96.9488945, -84.5120196, -74.0059728, -77.09470920000001, -83.1497751, -75.7116032, -96.9488945, -121.9552356, -74.41738769999999, -122.4194155, -87.8445119, -82.9987942, -122.1817252, -90.1994042, -71.38255579999999, -71.8022934, -75.5397878, -122.08385109999999, -80.9450759, -95.36980279999999, -94.5729781, -71.19896949999999, -87.62979820000001, -87.8445119, -80.0734005, -122.03634960000001, -96.9488945, -77.17109140000001, -76.6121893, -81.8552196, -84.9877094, -122.4194155, -87.9259058, -94.6707917, -118.4164652, -71.512617, -87.3328139, -115.13982959999998, -122.2363548, -95.36980279999999, -76.6458043, -76.6121893, -77.4380485, -118.45719740000001, -86.7827772, -82.9987942, -71.0588801, -98.1244531, -73.68346209999999, -96.7969879, -87.62979820000001, -112.07403729999999, -81.40233559999999, -82.5306527, -118.1597929, -81.3444573, -74.0059728, -83.6874562, -104.990251, -121.9552356, -77.35700279999999, -100.7837392, -77.0085876, -116.2023137, -74.0059728, -81.0739827, -84.3879824, -91.6663523, -88.0132958, -76.6121893, -77.38609759999999, -118.24368490000002, -122.2363548, -73.35790490000001, -95.36980279999999, -77.0909809, -80.1917902, -80.8431267, -83.53786740000001, -80.1289321, -75.4395931, -86.1180435, -87.90840390000001, -71.0588801, -95.36980279999999, -82.4571776, -74.0059728, -122.08385109999999, -111.9400054, -118.41090890000001, -95.36980279999999, -74.0059728, -96.6397822, -73.7176312, -88.22294000000001, -87.8445119, -111.9260519, -73.53873409999999, -88.462609, -74.0059728, -87.9535534, -84.3173878, -82.3212302, -87.8211854, -76.727745, -86.158068, -84.3879824, -86.80249, -78.6381787, -74.0059728, -94.3102228, -85.6680863, -97.13306829999999, -83.9787808, -74.0059728, -93.711332, -86.80249, -111.9260519, -79.8100472, -95.36980279999999, -71.0588801, -96.72985190000001, -84.45188540000001, -79.9958864, -74.7909125, -80.0533746, -80.8431267, -87.5710898, -76.7769324, -87.90840390000001, -83.6538244, -95.93450340000001, -120.7401385, -73.0931641, -96.7969879, -96.9488945, -96.9488945, -104.990251, -71.1522765, -81.655651, -95.36980279999999, -95.36980279999999, -87.62979820000001, -83.067965, -95.4612625, -83.47549129999999, -75.1652215, -87.62979820000001, -97.13417829999999, -95.93450340000001, -82.9987942, -87.840625, -74.0059728, -87.62979820000001, -82.3940104, -112.07403729999999, -83.9207392, -72.9278835, -73.53873409999999, -74.0059728, -88.9936873, -89.4012302, -75.4395931, -81.6943605, -79.9958864, -80.9111862, -84.3879824, -82.8087864, -121.8995741, -76.7944104, -122.2015159, -80.25659499999999, -86.1180435, -82.02820009999999, -75.1652215, -117.82650490000002, -111.8412502, -119.8138027, -118.24368490000002, -94.7976958, -84.3879824, -74.0059728, -84.3879824, -89.4012302, -79.9958864, -117.56643840000001, -84.3879824, -87.62979820000001, -84.3879824, -89.0317765, -80.0819879, -74.961517, -74.0059728, -77.4360481, -77.4360481, -89.574563, -89.1306124, -87.82895479999999, -73.53873409999999, -74.0059728, -95.36980279999999, -83.14993220000001, -95.19029859999999, -96.7969879, -122.1560768, -121.9552356, -84.5085536, -111.9260519, -113.5684164, -87.62979820000001, -95.41716009999999, -122.7140548, -83.9207392, -74.0059728, -77.6088465, -96.7969879, -75.96521170000001, -77.17726040000001, -83.4838244, -74.41738769999999, -122.03634960000001, -97.5164276, -95.36980279999999, -115.13982959999998, -80.8431267, -96.7969879, -71.55228740000001, -94.5785667, -79.79197540000001, -85.6680863, -74.0059728, -82.9987942, -81.40233559999999, -87.9064736, -117.1610838, -90.1994042, -85.7584557, -96.6988856, -121.7568946, -80.1386547, -70.7927832, -111.9400054, -83.3671432, -86.29110240000001, -66.1057355, -122.4194155, -90.0489801, -71.4948322, -86.78160159999999, -81.6943605, -73.4151214, -76.6121893, -81.6943605, -74.0059728, -80.13731740000001, -86.86888990000001, -122.14301950000001, -87.62979820000001, -81.6943605, -70.7625532, -83.6499321, -73.7176312, -75.1285061, -118.24368490000002, -94.39854749999999, -74.0059728, -117.82650490000002, -74.0059728, -74.0059728, -74.0059728, -97.5164276, -76.72282270000001, -82.9987942, -84.5120196, -77.17726040000001, -90.1009108, -96.72985190000001, -83.7430378, -73.53873409999999, -90.1994042, -84.50371640000002, -118.1445155, -117.1610838, -95.93450340000001, -87.62979820000001, -75.266289, -111.89104740000002, -76.6585074, -86.78160159999999, -82.9987942, -122.08385109999999, -87.62979820000001, -71.0588801, -94.5785667, -96.6988856, -157.8583333, -74.0059728, -78.6381787, -118.255075, -117.91882209999999, -87.5710898, -122.03634960000001, -95.36980279999999, -80.3035602, -118.66148090000002, -81.6943605, -122.4194155, -97.3307658, -118.83759369999999, -75.5138118, -71.0588801, -81.6943605, -77.0909809, -71.4418101, -102.0779146, -73.76290970000001, -94.5785667, -104.990251, -95.67515759999999, -115.13982959999998, -157.8583333, -77.17726040000001, -115.13982959999998, -111.8874392, -76.727745, -121.88632859999998, -88.462609, -122.4442906, -95.992775, -93.29827990000001, -71.10973349999999, -78.63419959999999, -71.0588801, -87.62979820000001, -74.7523874, -88.3161965, -111.9260519, -122.6587185, -81.609844, -84.2866083, -122.4194155, -79.9958864, -74.0059728, -74.0643065, -77.35700279999999, -122.4194155, -95.36980279999999, -71.2356113, -122.5274755, -96.9488945, -112.07403729999999, -78.6381787, -90.1994042, -115.13982959999998, -96.7969879, -87.9064736, -93.2650108, -73.8201337, -74.0059728, -75.9268747, -118.24368490000002, -95.36980279999999, -120.7401385, -85.5600316, -121.88632859999998, -74.0059728, -88.2825668, -73.6812293, -81.6943605, -87.62979820000001, -121.88632859999998, -90.1994042, -84.21714240000001, -71.51630490000001, -93.0899578, -80.0533746, -71.2356113, -90.5770675, -121.88632859999998, -72.9278835, -87.840625, -94.5785667, -81.87230840000001, -111.6585337, -86.018651, -72.57538690000001, -80.8431267, -74.0679753, -87.9064736, -90.0489801, -80.1289321, -86.158068, -97.5164276, -118.6541895, -78.6381787, -81.3792365, -94.6169012, -84.2212938, -97.01500779999999, -111.9260519, -121.8746789, -87.62979820000001, -80.8431267, -92.66626740000001, -88.1227199, -85.7584557, -95.36980279999999, -122.4194155, -73.4686858, -105.2705456, -77.4360481, -118.1597929, -118.4164652, -93.2259349, -91.0432051, -71.2092214, -122.7159726, -122.28858079999999, -77.0909809, -93.63191309999999, -73.71444770000001, -84.1916069, -85.9766671, -75.3698895, -74.0776417, -66.0509549, -121.8746789, -121.8746789, -82.9987942, -71.0786653, -95.93450340000001, -118.24368490000002, -105.084423, -122.33207079999998, -84.50371640000002, -88.01117459999999, -95.992775, -74.4884868, -86.7827772, -77.4360481, -81.9179173, -117.1610838, -83.3527097, -90.0489801, -117.82363909999998, -79.9958864, -122.5310874, -78.78111690000001, -122.06518190000001, -157.8583333, -104.98775970000001, -95.36980279999999, -121.9552356, -117.1124241, -122.6587185, -118.24368490000002, -122.2015159, -77.17726040000001, -117.1610838, -74.0059728, -96.7969879, -73.53873409999999, -70.3711617, -93.1471667, -82.9987942, -95.36980279999999, -95.36980279999999, -81.3792365, -80.8431267, -88.0934108, -87.62979820000001, -121.88632859999998, -71.19562049999999, -86.158068, -87.62979820000001, -121.9552356, -104.990251, -77.4310992, -94.6707917, -73.62819640000001, -87.7808967, -95.36980279999999, -104.990251, -87.9064736, -84.3879824, -71.1367953, -105.0866504, -84.5120196, -81.02283309999999, -81.02283309999999, -118.4164652, -74.54932840000001, -95.36980279999999, -121.88632859999998, -74.05652979999999, -87.62979820000001, -74.9325683, -84.36155550000001, -71.17244670000001, -117.82650490000002, -117.6581562, -117.82650490000002]}
| true
| true
|
1c4628a354b0cddbcb048a1d50ce815aaa040404
| 277
|
py
|
Python
|
dev_global/dev_global/env.py
|
FrederichRiver/neutrino3
|
c16c6ea824999c012252d0e281473a6ab13fd38e
|
[
"BSD-3-Clause"
] | 1
|
2021-07-12T11:20:58.000Z
|
2021-07-12T11:20:58.000Z
|
dev_global/dev_global/env.py
|
FrederichRiver/neutrino3
|
c16c6ea824999c012252d0e281473a6ab13fd38e
|
[
"BSD-3-Clause"
] | null | null | null |
dev_global/dev_global/env.py
|
FrederichRiver/neutrino3
|
c16c6ea824999c012252d0e281473a6ab13fd38e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
"""
global environment varibles
"""
PYTHON_VERSION = 3.8
LOCAL_TIME_ZONE = 'Beijing'
PROG_NAME = 'Neutrino'
TIME_FMT = '%Y-%m-%d'
LOG_TIME_FMT = "%Y-%m-%d %H:%M:%S"
GITHUB_URL = "https://github.com/FrederichRiver/neutrino3"
EMAIL = "hezhiyuan_tju@163.com"
| 19.785714
| 58
| 0.696751
|
PYTHON_VERSION = 3.8
LOCAL_TIME_ZONE = 'Beijing'
PROG_NAME = 'Neutrino'
TIME_FMT = '%Y-%m-%d'
LOG_TIME_FMT = "%Y-%m-%d %H:%M:%S"
GITHUB_URL = "https://github.com/FrederichRiver/neutrino3"
EMAIL = "hezhiyuan_tju@163.com"
| true
| true
|
1c462bb178d3b38b6a5d6e1fcb701ca8021f18d6
| 4,671
|
py
|
Python
|
src/djangoSrc/app_api/settings.py
|
dighr/nethope_audio
|
8571bd6f621920f3fea085be3879cab15ccfc1e6
|
[
"MIT"
] | null | null | null |
src/djangoSrc/app_api/settings.py
|
dighr/nethope_audio
|
8571bd6f621920f3fea085be3879cab15ccfc1e6
|
[
"MIT"
] | 9
|
2021-03-09T21:01:14.000Z
|
2022-03-02T06:01:00.000Z
|
src/djangoSrc/app_api/settings.py
|
nethopeorg/nethope_audio
|
8571bd6f621920f3fea085be3879cab15ccfc1e6
|
[
"MIT"
] | null | null | null |
"""
Django settings for app_api project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6xxk%%z1ii*9%j(a-8p63(l&v$fb2de1w2fl24b(@rxzgcpk-8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['nethope-pr-assessment.appspot.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'audio_transcription',
'rest_framework',
'dropbox_listener'
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), '..//', 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = './static'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if os.getenv('GAE_APPLICATION', None):
# Running on production App Engine, so connect to Google Cloud SQL using
# the unix socket at /cloudsql/<your-cloudsql-connection string>
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/nethope-pr-assessment:us-central1:nethopemysql',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
else:
# Running locally so connect to either a local MySQL instance or connect to
# Cloud SQL via the proxy. To start the proxy via command line:
#
# $ cloud_sql_proxy -instances=[INSTANCE_CONNECTION_NAME]=tcp:3306
#
# See https://cloud.google.com/sql/docs/mysql-connect-proxy
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': '5505',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
| 28.309091
| 98
| 0.673946
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '6xxk%%z1ii*9%j(a-8p63(l&v$fb2de1w2fl24b(@rxzgcpk-8'
DEBUG = True
ALLOWED_HOSTS = ['nethope-pr-assessment.appspot.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'audio_transcription',
'rest_framework',
'dropbox_listener'
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), '..//', 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app_api.wsgi.application'
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = './static'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
if os.getenv('GAE_APPLICATION', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '/cloudsql/nethope-pr-assessment:us-central1:nethopemysql',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': '5505',
'USER': os.environ["username"],
'PASSWORD': os.environ['password'],
'NAME': 'audio_transcription',
}
}
| true
| true
|
1c462c16ea8f0a10483f7cda6cfdbfbea0e74394
| 615
|
py
|
Python
|
themes/solarized-light.py
|
ruturajv/powerline-shell
|
2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8
|
[
"MIT"
] | null | null | null |
themes/solarized-light.py
|
ruturajv/powerline-shell
|
2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8
|
[
"MIT"
] | null | null | null |
themes/solarized-light.py
|
ruturajv/powerline-shell
|
2c30b504bc1da01e8f0a2bc2723ad5cc70662ec8
|
[
"MIT"
] | null | null | null |
class Color(DefaultColor):
USERNAME_FG = 15
USERNAME_BG = 4
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 15
HOSTNAME_BG = 10
HOME_SPECIAL_DISPLAY = False
PATH_FG = 10
PATH_BG = 7
CWD_FG = 0
SEPARATOR_FG = 14
READONLY_BG = 1
READONLY_FG = 7
REPO_CLEAN_FG = 0
REPO_CLEAN_BG = 15
REPO_DIRTY_FG = 1
REPO_DIRTY_BG = 15
JOBS_FG = 4
JOBS_BG = 7
CMD_PASSED_FG = 15
CMD_PASSED_BG = 2
CMD_FAILED_FG = 15
CMD_FAILED_BG = 1
SVN_CHANGES_FG = REPO_DIRTY_FG
SVN_CHANGES_BG = REPO_DIRTY_BG
VIRTUAL_ENV_BG = 15
VIRTUAL_ENV_FG = 2
| 17.083333
| 34
| 0.64065
|
class Color(DefaultColor):
USERNAME_FG = 15
USERNAME_BG = 4
USERNAME_ROOT_BG = 1
HOSTNAME_FG = 15
HOSTNAME_BG = 10
HOME_SPECIAL_DISPLAY = False
PATH_FG = 10
PATH_BG = 7
CWD_FG = 0
SEPARATOR_FG = 14
READONLY_BG = 1
READONLY_FG = 7
REPO_CLEAN_FG = 0
REPO_CLEAN_BG = 15
REPO_DIRTY_FG = 1
REPO_DIRTY_BG = 15
JOBS_FG = 4
JOBS_BG = 7
CMD_PASSED_FG = 15
CMD_PASSED_BG = 2
CMD_FAILED_FG = 15
CMD_FAILED_BG = 1
SVN_CHANGES_FG = REPO_DIRTY_FG
SVN_CHANGES_BG = REPO_DIRTY_BG
VIRTUAL_ENV_BG = 15
VIRTUAL_ENV_FG = 2
| true
| true
|
1c462cdb35036a78451f4184c423dcc60ac9ac47
| 7,497
|
py
|
Python
|
poptorch/toolbox/Dataloader_h5.py
|
balewski/neuron_inverter_benchmark
|
4ad8a03c07e174728ccea2bc5f24d1ae620966a8
|
[
"MIT"
] | null | null | null |
poptorch/toolbox/Dataloader_h5.py
|
balewski/neuron_inverter_benchmark
|
4ad8a03c07e174728ccea2bc5f24d1ae620966a8
|
[
"MIT"
] | null | null | null |
poptorch/toolbox/Dataloader_h5.py
|
balewski/neuron_inverter_benchmark
|
4ad8a03c07e174728ccea2bc5f24d1ae620966a8
|
[
"MIT"
] | 1
|
2022-01-14T22:25:20.000Z
|
2022-01-14T22:25:20.000Z
|
__author__ = "Jan Balewski"
__email__ = "janstar1122@gmail.com"
'''
this data loader reads all data upon start, there is no distributed sampler
reads all data at once and serves them from RAM
- optimized for mult-GPU training
- only used block of data from each H5-file
- reads data from common file for all ranks
- allows for in-fly transformation
Shuffle: only all samples after read is compleated
'''
import time, os
import random
import h5py
import numpy as np
from pprint import pprint
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import logging
import poptorch
#...!...!..................
def get_data_loader(params, inpMD,domain,popopts, verb=1):
conf=copy.deepcopy(params) # the input is reused later in the upper level code
#print('\n\nGDL:',domain)
conf['domain']=domain
conf['h5name']=params['data_path']+inpMD['h5nameTemplate'].replace('*',params['cell_name'])
if params['num_inp_chan']!=None: #user wants a change
assert params['num_inp_chan']>0
assert params['num_inp_chan']<=inpMD['numFeature']
conf['numInpChan']=params['num_inp_chan']
else: # just copy the meta-data value
conf['numInpChan']=inpMD['numFeature']
conf['doAux']=False #legacy switch never used
#pprint(conf)
dataset= Dataset_h5_neuronInverter(conf,verb)
if 'max_samples_per_epoch' in params:
max_samp= params['max_samples_per_epoch']
print('GDL: WARN, shorter %s max_samples=%d from %d'%(domain,max_samp,dataset.numLocFrames))
dataset.numLocFrames=min(max_samp,dataset.numLocFrames)
#print('bb',len(dataset),dataset.sanity())
# GC-speciffic constraint:
assert len(dataset)//conf['local_batch_size']//conf['gc_m2000']['replica_steps_per_iter']>0
params[domain+'_steps_per_epoch']=dataset.sanity()
params['model']['inputShape']=list(dataset.data_frames.shape[1:])
params['model']['outputSize']=dataset.data_parU.shape[1]
#shuffle=domain=='train' # use False only for reproducibility
shuffle=True # both: train & val
# Graphcore speciffic
dataloader = poptorch.DataLoader(popopts,dataset,
batch_size=conf['local_batch_size'],
num_workers=conf['num_data_workers'],
shuffle=shuffle,
persistent_workers=True,
mode=poptorch.DataLoaderMode.Async,
async_options={
"sharing_strategy":
poptorch.SharingStrategy.SharedMemory,
"early_preload": True,
"buffer_size": conf['num_data_workers'],
"load_indefinitely": True,
"miss_sleep_time_in_ms": 0
},
auto_distributed_partitioning=False, #to serve all data
)
dataloader.conf=conf
#print('cc',len(dataloader))
return dataloader
#-------------------
#-------------------
#-------------------
class Dataset_h5_neuronInverter(Dataset):
def __init__(self, conf,verb=1):
self.conf=conf
self.verb=verb
self.openH5()
if self.verb and 0:
print('\nDS-cnst name=%s shuffle=%r BS=%d steps=%d myRank=%d numSampl/hd5=%d'%(self.conf['name'],self.conf['shuffle'],self.localBS,self.__len__(),self.conf['world_rank'],self.conf['numSamplesPerH5']),'H5-path=',self.conf['dataPath'])
assert self.numLocFrames>0
assert self.conf['world_rank']>=0
if self.verb :
logging.info(' DS:load-end %s locSamp=%d, X.shape: %s type: %s'%(self.conf['domain'],self.numLocFrames,str(self.data_frames.shape),self.data_frames.dtype))
#print(' DS:Xall',self.data_frames.shape,self.data_frames.dtype)
#print(' DS:Yall',self.data_parU.shape,self.data_parU.dtype)
#...!...!..................
def sanity(self):
stepPerEpoch=int(np.floor( self.numLocFrames/ self.conf['local_batch_size']))
if stepPerEpoch <1:
print('\nDS:ABORT, Have you requested too few samples per rank?, numLocFrames=%d, BS=%d name=%s'%(self.numLocFrames, localBS,self.conf['name']))
exit(67)
# all looks good
return stepPerEpoch
#...!...!..................
def openH5(self):
cf=self.conf
inpF=cf['h5name']
inpFeat=cf['numInpChan'] # this is what user wants
dom=cf['domain']
if self.verb>0 : logging.info('DS:fileH5 %s rank %d of %d '%(inpF,cf['world_rank'],cf['world_size']))
if not os.path.exists(inpF):
print('FAILD, missing HD5',inpF)
exit(22)
startTm0 = time.time()
# = = = READING HD5 start
h5f = h5py.File(inpF, 'r')
Xshape=h5f[dom+'_frames'].shape
totSamp=Xshape[0]
locStep=int(totSamp/cf['world_size']/cf['local_batch_size'])
locSamp=locStep*cf['local_batch_size']
#print('totSamp=%d locStep=%d'%(totSamp,locStep))
assert locStep>0
maxShard= totSamp// locSamp
assert maxShard>=cf['world_size']
# chosen shard is rank dependent, wraps up if not sufficient number of ranks
myShard=self.conf['world_rank'] %maxShard
sampIdxOff=myShard*locSamp
if self.verb: logging.info('DS:file dom=%s myShard=%d, maxShard=%d, sampIdxOff=%d allXshape=%s inpFeat=%d'%(cf['domain'],myShard,maxShard,sampIdxOff,str(Xshape),inpFeat))
# data reading starts ....
assert inpFeat<=Xshape[2]
if inpFeat==Xshape[2]:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')
else:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp,:,:inpFeat]
self.data_parU=h5f[dom+'_unitStar_par'][sampIdxOff:sampIdxOff+locSamp]#.astype('float32')
if cf['doAux']: #never used
self.data_parP=h5f[dom+'_phys_par'][sampIdxOff:sampIdxOff+locSamp]
h5f.close()
# = = = READING HD5 done
if self.verb>0 :
startTm1 = time.time()
if self.verb: logging.info('DS: hd5 read time=%.2f(sec) dom=%s '%(startTm1 - startTm0,dom))
# .......................................................
#.... data embeddings, transformation should go here ....
#self.data_parU*=1.2
#.... end of embeddings ........
# .......................................................
if 0: # check normalization
xm=np.mean(self.data_frames)
xs=np.std(self.data_frames)
print('xm',xm,xs,myShard,cf['domain'])
ok99
self.numLocFrames=self.data_frames.shape[0]
#self.numLocFrames=512*10 # reduce nymber of samples
def __len__(self):
return self.numLocFrames
def __getitem__(self, idx):
# print('DSI:',idx,self.conf['name'],self.cnt); self.cnt+=1
assert idx>=0
assert idx< self.numLocFrames
X=self.data_frames[idx]
Y=self.data_parU[idx]
return (X,Y)
if self.conf['x_y_aux']: # predictions for Roy
AUX=self.data_parP[pCnt:pCnt+bs]
return (X,Y,AUX)
| 38.25
| 246
| 0.577431
|
__author__ = "Jan Balewski"
__email__ = "janstar1122@gmail.com"
import time, os
import random
import h5py
import numpy as np
from pprint import pprint
import copy
from torch.utils.data import Dataset, DataLoader
import torch
import logging
import poptorch
def get_data_loader(params, inpMD,domain,popopts, verb=1):
conf=copy.deepcopy(params)
conf['domain']=domain
conf['h5name']=params['data_path']+inpMD['h5nameTemplate'].replace('*',params['cell_name'])
if params['num_inp_chan']!=None:
assert params['num_inp_chan']>0
assert params['num_inp_chan']<=inpMD['numFeature']
conf['numInpChan']=params['num_inp_chan']
else:
conf['numInpChan']=inpMD['numFeature']
conf['doAux']=False
dataset= Dataset_h5_neuronInverter(conf,verb)
if 'max_samples_per_epoch' in params:
max_samp= params['max_samples_per_epoch']
print('GDL: WARN, shorter %s max_samples=%d from %d'%(domain,max_samp,dataset.numLocFrames))
dataset.numLocFrames=min(max_samp,dataset.numLocFrames)
assert len(dataset)//conf['local_batch_size']//conf['gc_m2000']['replica_steps_per_iter']>0
params[domain+'_steps_per_epoch']=dataset.sanity()
params['model']['inputShape']=list(dataset.data_frames.shape[1:])
params['model']['outputSize']=dataset.data_parU.shape[1]
der = poptorch.DataLoader(popopts,dataset,
batch_size=conf['local_batch_size'],
num_workers=conf['num_data_workers'],
shuffle=shuffle,
persistent_workers=True,
mode=poptorch.DataLoaderMode.Async,
async_options={
"sharing_strategy":
poptorch.SharingStrategy.SharedMemory,
"early_preload": True,
"buffer_size": conf['num_data_workers'],
"load_indefinitely": True,
"miss_sleep_time_in_ms": 0
},
auto_distributed_partitioning=False,
)
dataloader.conf=conf
return dataloader
class Dataset_h5_neuronInverter(Dataset):
def __init__(self, conf,verb=1):
self.conf=conf
self.verb=verb
self.openH5()
if self.verb and 0:
print('\nDS-cnst name=%s shuffle=%r BS=%d steps=%d myRank=%d numSampl/hd5=%d'%(self.conf['name'],self.conf['shuffle'],self.localBS,self.__len__(),self.conf['world_rank'],self.conf['numSamplesPerH5']),'H5-path=',self.conf['dataPath'])
assert self.numLocFrames>0
assert self.conf['world_rank']>=0
if self.verb :
logging.info(' DS:load-end %s locSamp=%d, X.shape: %s type: %s'%(self.conf['domain'],self.numLocFrames,str(self.data_frames.shape),self.data_frames.dtype))
def sanity(self):
stepPerEpoch=int(np.floor( self.numLocFrames/ self.conf['local_batch_size']))
if stepPerEpoch <1:
print('\nDS:ABORT, Have you requested too few samples per rank?, numLocFrames=%d, BS=%d name=%s'%(self.numLocFrames, localBS,self.conf['name']))
exit(67)
return stepPerEpoch
def openH5(self):
cf=self.conf
inpF=cf['h5name']
inpFeat=cf['numInpChan']
dom=cf['domain']
if self.verb>0 : logging.info('DS:fileH5 %s rank %d of %d '%(inpF,cf['world_rank'],cf['world_size']))
if not os.path.exists(inpF):
print('FAILD, missing HD5',inpF)
exit(22)
startTm0 = time.time()
h5f = h5py.File(inpF, 'r')
Xshape=h5f[dom+'_frames'].shape
totSamp=Xshape[0]
locStep=int(totSamp/cf['world_size']/cf['local_batch_size'])
locSamp=locStep*cf['local_batch_size']
assert locStep>0
maxShard= totSamp// locSamp
assert maxShard>=cf['world_size']
myShard=self.conf['world_rank'] %maxShard
sampIdxOff=myShard*locSamp
if self.verb: logging.info('DS:file dom=%s myShard=%d, maxShard=%d, sampIdxOff=%d allXshape=%s inpFeat=%d'%(cf['domain'],myShard,maxShard,sampIdxOff,str(Xshape),inpFeat))
assert inpFeat<=Xshape[2]
if inpFeat==Xshape[2]:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp]
else:
self.data_frames=h5f[dom+'_frames'][sampIdxOff:sampIdxOff+locSamp,:,:inpFeat]
self.data_parU=h5f[dom+'_unitStar_par'][sampIdxOff:sampIdxOff+locSamp]
if cf['doAux']:
self.data_parP=h5f[dom+'_phys_par'][sampIdxOff:sampIdxOff+locSamp]
h5f.close()
if self.verb>0 :
startTm1 = time.time()
if self.verb: logging.info('DS: hd5 read time=%.2f(sec) dom=%s '%(startTm1 - startTm0,dom))
if 0:
xm=np.mean(self.data_frames)
xs=np.std(self.data_frames)
print('xm',xm,xs,myShard,cf['domain'])
ok99
self.numLocFrames=self.data_frames.shape[0]
return self.numLocFrames
def __getitem__(self, idx):
assert idx>=0
assert idx< self.numLocFrames
X=self.data_frames[idx]
Y=self.data_parU[idx]
return (X,Y)
if self.conf['x_y_aux']:
AUX=self.data_parP[pCnt:pCnt+bs]
return (X,Y,AUX)
| true
| true
|
1c462d72ef28053c69095bed607d4c067e869b96
| 3,358
|
py
|
Python
|
expression_evaluation.py
|
mengguoru/expression_evaluation
|
a2e4dd45611e4577c38b40de3a718ecd5f77c5ae
|
[
"MIT"
] | null | null | null |
expression_evaluation.py
|
mengguoru/expression_evaluation
|
a2e4dd45611e4577c38b40de3a718ecd5f77c5ae
|
[
"MIT"
] | null | null | null |
expression_evaluation.py
|
mengguoru/expression_evaluation
|
a2e4dd45611e4577c38b40de3a718ecd5f77c5ae
|
[
"MIT"
] | null | null | null |
'''
expression evaluation
author : mengguoru
date : 2016/03/27
'''
import re
class Expression:
def split(self,expr):
'''split numbers and operators into a array,return the array (without whiteSpace)'''
temp = re.split(r"(\+|\-|\*|\/|\(|\))",re.sub(r"\s+",'',expr))
temp2 = []
for i in range(len(temp)):
if temp[i] != '':
temp2.append(temp[i])
return temp2
def infix_to_suffix(self,expr):
'''Shutting Yard Algorithm'''
stack_out = []
stack_operator = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <= '9':
stack_out.append(expr[i])
else:
if(len(stack_operator) == 0):
stack_operator.append(expr[i])
else:
if str(expr[i]) == ')':
while len(stack_operator) > 0:
temp = stack_operator.pop()
if temp != '(':
stack_out.append(temp)
else:
break
elif expr[i] == '(':
stack_operator.append(expr[i])
else:
temp = stack_operator.pop()
while self.cmp_Precedence(expr[i],temp) == False:
stack_out.append(temp)
if len(stack_operator) > 0:
temp = stack_operator.pop()
else:
break
# if expr[i] precedence >= temp,temp should push back
stack_operator.append(temp)
stack_operator.append(expr[i])
while len(stack_operator) > 0:
stack_out.append(stack_operator.pop())
return stack_out
def cmp_Precedence(self,op1,op2):
if(op1 == '*'or op1 == '/') and (op2 == '+'or op2 == '-'):
return True
elif(op1 == '*'or op1 == '/') and (op2 == '*'or op2=='/'):
return True
elif(op1=='+'or op1=='-')and(op2=='+'or op2=='-'):
return True
elif op2=='(':
return True
else:
return False
def evaluate_suffix(self,expr):
'''Reverse Polish Notation'''
stack = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <='9':
# print(stack)
stack.append(int(expr[i]))
else:
stack.append(self.calculate_2_param(expr[i],stack.pop(),stack.pop()))
return stack.pop()
def calculate_2_param(self,oper,num1,num2):
return {'+':num1+num2,'-':num2-num1,'*':num1*num2,'/':num1/num2}[oper]
def evaluate(self):
pass
if __name__ == '__main__':
'''
5 + ((1 + 2) * 4) − 3转成 [5,1,2,'+',4,'*','+',3,'-']
'''
a = Expression()
b = a.split("5 + ((1 + 2) * 4)-3")
print(b) # output: ['5', '+', '(', '(', '1', '+', '2', ')', '*', '4', ')', '-', '3'],test pass
# 5 1 2 + 4 * + 3 − 对应后缀 后缀求值应该为14
temp = ['5','1','2','+','4','*','+','3','-']
print(a.evaluate_suffix(temp)) # outpue : 14 test pass
# 5 1 2 + 4 * + 3 −为应输出结果
print(a.infix_to_suffix(b))
| 38.159091
| 98
| 0.432102
|
import re
class Expression:
def split(self,expr):
temp = re.split(r"(\+|\-|\*|\/|\(|\))",re.sub(r"\s+",'',expr))
temp2 = []
for i in range(len(temp)):
if temp[i] != '':
temp2.append(temp[i])
return temp2
def infix_to_suffix(self,expr):
stack_out = []
stack_operator = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <= '9':
stack_out.append(expr[i])
else:
if(len(stack_operator) == 0):
stack_operator.append(expr[i])
else:
if str(expr[i]) == ')':
while len(stack_operator) > 0:
temp = stack_operator.pop()
if temp != '(':
stack_out.append(temp)
else:
break
elif expr[i] == '(':
stack_operator.append(expr[i])
else:
temp = stack_operator.pop()
while self.cmp_Precedence(expr[i],temp) == False:
stack_out.append(temp)
if len(stack_operator) > 0:
temp = stack_operator.pop()
else:
break
stack_operator.append(temp)
stack_operator.append(expr[i])
while len(stack_operator) > 0:
stack_out.append(stack_operator.pop())
return stack_out
def cmp_Precedence(self,op1,op2):
if(op1 == '*'or op1 == '/') and (op2 == '+'or op2 == '-'):
return True
elif(op1 == '*'or op1 == '/') and (op2 == '*'or op2=='/'):
return True
elif(op1=='+'or op1=='-')and(op2=='+'or op2=='-'):
return True
elif op2=='(':
return True
else:
return False
def evaluate_suffix(self,expr):
stack = []
for i in range(len(expr)):
if str(expr[i]) >= '0' and str(expr[i]) <='9':
stack.append(int(expr[i]))
else:
stack.append(self.calculate_2_param(expr[i],stack.pop(),stack.pop()))
return stack.pop()
def calculate_2_param(self,oper,num1,num2):
return {'+':num1+num2,'-':num2-num1,'*':num1*num2,'/':num1/num2}[oper]
def evaluate(self):
pass
if __name__ == '__main__':
a = Expression()
b = a.split("5 + ((1 + 2) * 4)-3")
print(b)
temp = ['5','1','2','+','4','*','+','3','-']
print(a.evaluate_suffix(temp))
print(a.infix_to_suffix(b))
| true
| true
|
1c4630086ef30c6136a9edabe95d3911ecb465d4
| 13,194
|
py
|
Python
|
lambda_function.py
|
rubrikinc/aws-native-secrets-rotation
|
c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd
|
[
"MIT"
] | 1
|
2019-12-20T13:35:34.000Z
|
2019-12-20T13:35:34.000Z
|
lambda_function.py
|
rubrikinc/aws-native-secrets-rotation
|
c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd
|
[
"MIT"
] | null | null | null |
lambda_function.py
|
rubrikinc/aws-native-secrets-rotation
|
c1488cc1b6fc2b89d32c83bd220678ee3bebfdbd
|
[
"MIT"
] | 2
|
2019-04-01T22:18:58.000Z
|
2020-03-13T15:08:26.000Z
|
#!/usr/local/bin/python3
import boto3
import logging
import os
import ast
import json
import rubrik_cdm
from copy import deepcopy
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""Secrets Manager Rotation Template
This is a template for creating an AWS Secrets Manager rotation lambda
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the event parameters do not contain the expected keys
"""
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
# Setup the local secret manager client
secret_service_client = boto3.client('secretsmanager')
# Make sure the version is staged correctly
metadata = secret_service_client.describe_secret(SecretId=arn)
if not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
# retrieve current secret
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# if the secret is for the account this function is executing in, use this function's role to talk to IAM
if current_secret['accountid'] == context.invoked_function_arn.split(":")[4]:
iam_service_client = boto3.client('iam')
# otherwise, attempt to assume a role into the target account
else:
iam_service_client = assume_role(role_arn=current_secret['rolearn'], session_name=current_secret['accountid']+'_session').client('iam')
if step == "createSecret":
create_secret(secret_service_client, arn, token, iam_service_client, current_secret)
elif step == "setSecret":
set_secret(secret_service_client, arn, token)
elif step == "testSecret":
test_secret(secret_service_client, arn, token)
elif step == "finishSecret":
finish_secret(secret_service_client, arn, token, iam_service_client)
else:
raise ValueError("Invalid step parameter")
def assume_role(role_arn=None, session_name='my_session'):
"""
If role_arn is given assumes a role and returns boto3 session
otherwise return a regular session with the current IAM user/role
"""
if role_arn:
client = boto3.client('sts')
response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'])
return session
else:
return boto3.Session()
def create_secret(secret_service_client, arn, token, iam_service_client, current_secret):
"""Create the secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
"""
# Make sure the current secret exists
secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
secret_service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage="AWSPENDING")
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except secret_service_client.exceptions.ResourceNotFoundException:
# Generate new IAM credentials for this secret, fail if too many keys already exist
if len(iam_service_client.list_access_keys(UserName=current_secret['iamuser'])['AccessKeyMetadata']) > 1:
logger.error("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
raise ValueError("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
else:
new_access_keys = iam_service_client.create_access_key(UserName=current_secret['iamuser'])
# Create new secret string
new_secret = deepcopy(current_secret)
new_secret['iamaccesskey'] = new_access_keys['AccessKey']['AccessKeyId']
new_secret['iamsecretkey'] = new_access_keys['AccessKey']['SecretAccessKey']
new_secret_json = json.dumps(new_secret)
# Put the secret
secret_service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=new_secret_json, VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(secret_service_client, arn, token):
"""Set the secret
This method should set the AWSPENDING secret in the service that the secret belongs to. For example, if the secret is a database
credential, this method should take the value of the AWSPENDING secret and set the user's password to this value in the database.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# Retrieve secrets
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
# connect to rubrik api
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find cloud native source, generate config for update operation
cloud_sources = rubrik.get('internal', '/aws/account', timeout=15, authentication=True)['data']
logger.info('attempting to get current cloud source detail from rubrik...')
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=15, authentication=True)
logger.info('got cloud source detail for %s' % source['id'])
logger.info(source_detail)
logger.info('checking if source detail access key %s matches current access key %s' % (source_detail['accessKey'], current_secret['iamaccesskey']))
if source_detail['accessKey'] == current_secret['iamaccesskey']:
logger.info('found match!')
source_update_detail = deepcopy(source_detail)
source_update_detail['secretKey'] = pending_secret['iamsecretkey']
source_update_detail['accessKey'] = pending_secret['iamaccesskey']
details_to_remove = ('configuredSlaDomainName', 'primaryClusterId', 'id', 'configuredSlaDomainId')
for key in details_to_remove:
source_update_detail.pop(key, None)
else:
logger.info('no match found')
# if we found a matching Cloud Source, rotate the access key
if source_update_detail:
rubrik.update_aws_native_account(source_update_detail['name'], source_update_detail, timeout=30)
else:
logger.error("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
raise ValueError("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
def test_secret(secret_service_client, arn, token):
"""Test the secret
This method should validate that the AWSPENDING secret works in the service that the secret belongs to. For example, if the secret
is a database credential, this method should validate that the user can login with the password in AWSPENDING and that the user has
all of the expected permissions against the database.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# retrieve pending secret
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
# connect to rubrik api
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find relevant cloud source
cloud_sources = rubrik.get('internal', '/aws/account', timeout=60, authentication=True)['data']
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=60, authentication=True)
if source_detail['accessKey'] == pending_secret['iamaccesskey']:
source_id = source_detail['id']
# check if the cloud source can iterate subnets in us-east-1
try:
rubrik.get('internal', '/aws/account/%s/subnet?region=us-east-1' % (source_id), timeout=60, authentication=True)
except:
logger.error("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
raise ValueError("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
logger.info("testSecret: Successfully tested %s with new access keys" % source_id)
def finish_secret(secret_service_client, arn, token, iam_service_client):
"""Finish the secret
This method finalizes the rotation process by marking the secret version passed in as the AWSCURRENT secret.
Args:
secret_service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn does not exist
"""
# Get info about the depricated access key for deletion
depricated_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# First describe the secret to get the current version
metadata = secret_service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
secret_service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (version, arn))
# Delete the depricated access key
iam_service_client.delete_access_key(UserName=depricated_secret['iamuser'], AccessKeyId=depricated_secret['iamaccesskey'])
logger.info("Deleted depricated access key %s" % depricated_secret['iamaccesskey'])
| 53.417004
| 167
| 0.719948
|
import boto3
import logging
import os
import ast
import json
import rubrik_cdm
from copy import deepcopy
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
secret_service_client = boto3.client('secretsmanager')
metadata = secret_service_client.describe_secret(SecretId=arn)
if not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
if current_secret['accountid'] == context.invoked_function_arn.split(":")[4]:
iam_service_client = boto3.client('iam')
# otherwise, attempt to assume a role into the target account
else:
iam_service_client = assume_role(role_arn=current_secret['rolearn'], session_name=current_secret['accountid']+'_session').client('iam')
if step == "createSecret":
create_secret(secret_service_client, arn, token, iam_service_client, current_secret)
elif step == "setSecret":
set_secret(secret_service_client, arn, token)
elif step == "testSecret":
test_secret(secret_service_client, arn, token)
elif step == "finishSecret":
finish_secret(secret_service_client, arn, token, iam_service_client)
else:
raise ValueError("Invalid step parameter")
def assume_role(role_arn=None, session_name='my_session'):
if role_arn:
client = boto3.client('sts')
response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken'])
return session
else:
return boto3.Session()
def create_secret(secret_service_client, arn, token, iam_service_client, current_secret):
# Make sure the current secret exists
secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
secret_service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage="AWSPENDING")
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except secret_service_client.exceptions.ResourceNotFoundException:
# Generate new IAM credentials for this secret, fail if too many keys already exist
if len(iam_service_client.list_access_keys(UserName=current_secret['iamuser'])['AccessKeyMetadata']) > 1:
logger.error("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
raise ValueError("User %s has more than one access key definied, cannot rotate" % current_secret['iamuser'])
else:
new_access_keys = iam_service_client.create_access_key(UserName=current_secret['iamuser'])
# Create new secret string
new_secret = deepcopy(current_secret)
new_secret['iamaccesskey'] = new_access_keys['AccessKey']['AccessKeyId']
new_secret['iamsecretkey'] = new_access_keys['AccessKey']['SecretAccessKey']
new_secret_json = json.dumps(new_secret)
# Put the secret
secret_service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=new_secret_json, VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(secret_service_client, arn, token):
# Retrieve secrets
current_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
# connect to rubrik api
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find cloud native source, generate config for update operation
cloud_sources = rubrik.get('internal', '/aws/account', timeout=15, authentication=True)['data']
logger.info('attempting to get current cloud source detail from rubrik...')
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=15, authentication=True)
logger.info('got cloud source detail for %s' % source['id'])
logger.info(source_detail)
logger.info('checking if source detail access key %s matches current access key %s' % (source_detail['accessKey'], current_secret['iamaccesskey']))
if source_detail['accessKey'] == current_secret['iamaccesskey']:
logger.info('found match!')
source_update_detail = deepcopy(source_detail)
source_update_detail['secretKey'] = pending_secret['iamsecretkey']
source_update_detail['accessKey'] = pending_secret['iamaccesskey']
details_to_remove = ('configuredSlaDomainName', 'primaryClusterId', 'id', 'configuredSlaDomainId')
for key in details_to_remove:
source_update_detail.pop(key, None)
else:
logger.info('no match found')
# if we found a matching Cloud Source, rotate the access key
if source_update_detail:
rubrik.update_aws_native_account(source_update_detail['name'], source_update_detail, timeout=30)
else:
logger.error("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
raise ValueError("Could not find Cloud Native Source on Rubrik %s with access key %s" % (rubrik_credentials['rubrikhost'], current_secret['iamaccesskey']))
def test_secret(secret_service_client, arn, token):
# retrieve pending secret
pending_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSPENDING")['SecretString'])
# connect to rubrik api
rubrik_credentials = ast.literal_eval(secret_service_client.get_secret_value(SecretId='/rubrik/rubrik_cdm_credentials', VersionStage="AWSCURRENT")['SecretString'])
rubrik = rubrik_cdm.Connect(rubrik_credentials['rubrikhost'], rubrik_credentials['rubrikuser'], rubrik_credentials['rubrikpassword'])
# find relevant cloud source
cloud_sources = rubrik.get('internal', '/aws/account', timeout=60, authentication=True)['data']
for source in cloud_sources:
source_detail = rubrik.get('internal', '/aws/account/'+source['id'], timeout=60, authentication=True)
if source_detail['accessKey'] == pending_secret['iamaccesskey']:
source_id = source_detail['id']
# check if the cloud source can iterate subnets in us-east-1
try:
rubrik.get('internal', '/aws/account/%s/subnet?region=us-east-1' % (source_id), timeout=60, authentication=True)
except:
logger.error("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
raise ValueError("Error iterating subnets in us-east-1 for Cloud Source %s" % source_id)
logger.info("testSecret: Successfully tested %s with new access keys" % source_id)
def finish_secret(secret_service_client, arn, token, iam_service_client):
# Get info about the depricated access key for deletion
depricated_secret = ast.literal_eval(secret_service_client.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString'])
# First describe the secret to get the current version
metadata = secret_service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
secret_service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (version, arn))
# Delete the depricated access key
iam_service_client.delete_access_key(UserName=depricated_secret['iamuser'], AccessKeyId=depricated_secret['iamaccesskey'])
logger.info("Deleted depricated access key %s" % depricated_secret['iamaccesskey'])
| true
| true
|
1c4630ae0f50b4044900f2782a2b8d3bff5fdc1e
| 401
|
py
|
Python
|
task_manager_api/task_manager_api/urls.py
|
LsbProxy/task_manager_api
|
b014d74aa3cd5bc9952ac04548350d3a08836c8f
|
[
"MIT"
] | null | null | null |
task_manager_api/task_manager_api/urls.py
|
LsbProxy/task_manager_api
|
b014d74aa3cd5bc9952ac04548350d3a08836c8f
|
[
"MIT"
] | null | null | null |
task_manager_api/task_manager_api/urls.py
|
LsbProxy/task_manager_api
|
b014d74aa3cd5bc9952ac04548350d3a08836c8f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('auth.urls')),
path('', include('api.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 26.733333
| 60
| 0.680798
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('auth.urls')),
path('', include('api.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| true
| true
|
1c4631519443af09252e50a84ea2e878f561085d
| 20,551
|
py
|
Python
|
flux_combined_high_binding/model_857.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_857.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_857.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 170000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.143519
| 798
| 0.804146
|
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 100000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 170000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true
| true
|
1c4631a70ee71cb407b8c93a4400df836801fe55
| 1,365
|
py
|
Python
|
examples/cellular_example.py
|
timhunderwood/numpy-to-stl
|
eea305ae30bb4aa5882d7c66edebe76173da8b06
|
[
"MIT"
] | 1
|
2020-12-29T08:56:48.000Z
|
2020-12-29T08:56:48.000Z
|
examples/cellular_example.py
|
timhunderwood/numpy-to-stl
|
eea305ae30bb4aa5882d7c66edebe76173da8b06
|
[
"MIT"
] | null | null | null |
examples/cellular_example.py
|
timhunderwood/numpy-to-stl
|
eea305ae30bb4aa5882d7c66edebe76173da8b06
|
[
"MIT"
] | 1
|
2021-06-16T02:06:40.000Z
|
2021-06-16T02:06:40.000Z
|
import cellular
import numpy
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
import numpy_to_stl
def get_simulated_world(cells_per_day, rule, number_of_days):
world = cellular.World(cells_per_day, rule, ones=False)
world.simulate(number_of_days)
world.display(landscape=True)
return numpy.vstack(world.state)
def create_mesh_of_world(
cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=100
):
array = get_simulated_world(cells_per_day, rule, number_of_days)
return numpy_to_stl.create_surface_mesh_from_array(array, base_height=1)
def plot_stl_world(cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=200):
world_mesh = create_mesh_of_world(cells_per_day, rule, number_of_days)
figure = plt.figure()
axes = mpl_toolkits.mplot3d.Axes3D(figure)
#
# # Load the STL files and add the vectors to the plot
axes.add_collection3d(
mpl_toolkits.mplot3d.art3d.Poly3DCollection(
world_mesh.vectors, facecolor="red", edgecolor="black"
)
)
# Auto scale to the mesh size
scale = world_mesh.points.flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
# Show the plot to the screen
plt.show()
world_mesh.save("small_cellular_example.stl")
if __name__ == "__main__":
plot_stl_world(cells_per_day=100, number_of_days=200)
| 29.673913
| 88
| 0.745788
|
import cellular
import numpy
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
import numpy_to_stl
def get_simulated_world(cells_per_day, rule, number_of_days):
world = cellular.World(cells_per_day, rule, ones=False)
world.simulate(number_of_days)
world.display(landscape=True)
return numpy.vstack(world.state)
def create_mesh_of_world(
cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=100
):
array = get_simulated_world(cells_per_day, rule, number_of_days)
return numpy_to_stl.create_surface_mesh_from_array(array, base_height=1)
def plot_stl_world(cells_per_day=100, rule=cellular.rules.rule_777, number_of_days=200):
world_mesh = create_mesh_of_world(cells_per_day, rule, number_of_days)
figure = plt.figure()
axes = mpl_toolkits.mplot3d.Axes3D(figure)
ot3d.art3d.Poly3DCollection(
world_mesh.vectors, facecolor="red", edgecolor="black"
)
)
scale = world_mesh.points.flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
plt.show()
world_mesh.save("small_cellular_example.stl")
if __name__ == "__main__":
plot_stl_world(cells_per_day=100, number_of_days=200)
| true
| true
|
1c463309478ab2730838c468b3402f7a8124d47e
| 3,752
|
py
|
Python
|
elasticlogger/hooks/elasticsearch/elasticsearch.py
|
danteay/elasticlogger
|
3182e3d1d34564a5e95aaef3c10239d162eb691a
|
[
"MIT"
] | 1
|
2021-06-27T10:17:16.000Z
|
2021-06-27T10:17:16.000Z
|
elasticlogger/hooks/elasticsearch/elasticsearch.py
|
danteay/elasticlogger
|
3182e3d1d34564a5e95aaef3c10239d162eb691a
|
[
"MIT"
] | 4
|
2021-06-29T19:41:39.000Z
|
2021-09-23T21:47:22.000Z
|
elasticlogger/hooks/elasticsearch/elasticsearch.py
|
danteay/elasticlogger
|
3182e3d1d34564a5e95aaef3c10239d162eb691a
|
[
"MIT"
] | 1
|
2022-03-14T18:27:42.000Z
|
2022-03-14T18:27:42.000Z
|
"""Elastic search hook function."""
import os
import re
from datetime import datetime
from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING
from typing import Any, AnyStr, Dict, NoReturn, Optional
from elasticsearch import Elasticsearch
from elasticlogger import utils
from elasticlogger.hooks import HookContext
from elasticlogger.ports.elasticsearch import get_instance
from .errors import ESConfigurationError, ESEmptyIndexError, ESEmptyUrlError
class ElasticSearch:
"""Elastic Search hook implementation.
:type url: str
:param url: Elasticsearch cluster endpoint
:type index: str
:param index: Index of ES where will be stored the logs
:param **kwargs: All Elasticsearch object params
"""
def __init__(self, url: Optional[AnyStr] = None, index: Optional[AnyStr] = None, **kwargs: Dict[AnyStr, Any]):
self.__url: AnyStr = url if url else os.getenv('ELASTICSEARCH_URL', None)
self.__index: AnyStr = index if index else os.getenv('ELASTICSEARCH_INDEX', None)
self.__kwargs: Dict[AnyStr, Any] = kwargs
self.__client: Elasticsearch = self.__init_client()
def __call__(self, context: HookContext) -> NoReturn:
"""Main execution of the Elastic Search Hook.
:param context: Current log record context
"""
if not self.__check_level(context.level, context.logger_level):
return
document = {
"@timestamp": datetime.now(),
"@message": context.message,
"level": utils.get_level_name(context.level),
"name": context.logger_name,
}
document.update(context.extra_data)
document = self.__clean_metadata_keys(document)
self.__client.index(index=self.__index, body=document)
def __init_client(self) -> Elasticsearch:
"""Create new client instance to stream logs.
:return Elasticsearch: New client instance
"""
if self.__url is None:
raise ESEmptyUrlError('Empty Elasticsearch server.')
if self.__index is None:
raise ESEmptyIndexError('Empty Elasticsearch index.')
try:
return get_instance(self.__url, **self.__kwargs)
except Exception as error:
raise ESConfigurationError('Error creating Elasticsearch client instance') from error
@staticmethod
def __clean_metadata_keys(document: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
"""Remove all keys of a document that start with underscore to not be confused with metadata keys
:param document: Full document data
:return Dict[AnyStr, Any]: Cleaned document with out metadata keys
"""
new_document = document.copy()
for key in document.keys():
if re.search("^_", key) is not None:
del new_document[key]
return new_document
@staticmethod
def __check_level(log_level: int, logger_level: int) -> bool:
"""Validate if the configured level and the given logs are valid to stream to Elasticsearch
:param log_level: current log level of the ES document
:param logger_level: Global logger level
:return bool: Boolean assertion
"""
if log_level == DEBUG and logger_level == DEBUG:
return True
if log_level == INFO and logger_level in {DEBUG, INFO}:
return True
if log_level == WARNING and logger_level in {DEBUG, INFO, WARNING}:
return True
if log_level == ERROR and logger_level in {DEBUG, INFO, WARNING, ERROR}:
return True
if log_level == CRITICAL and logger_level in {DEBUG, INFO, WARNING, ERROR, CRITICAL}:
return True
return False
| 32.068376
| 114
| 0.661247
|
import os
import re
from datetime import datetime
from logging import CRITICAL, DEBUG, ERROR, INFO, WARNING
from typing import Any, AnyStr, Dict, NoReturn, Optional
from elasticsearch import Elasticsearch
from elasticlogger import utils
from elasticlogger.hooks import HookContext
from elasticlogger.ports.elasticsearch import get_instance
from .errors import ESConfigurationError, ESEmptyIndexError, ESEmptyUrlError
class ElasticSearch:
def __init__(self, url: Optional[AnyStr] = None, index: Optional[AnyStr] = None, **kwargs: Dict[AnyStr, Any]):
self.__url: AnyStr = url if url else os.getenv('ELASTICSEARCH_URL', None)
self.__index: AnyStr = index if index else os.getenv('ELASTICSEARCH_INDEX', None)
self.__kwargs: Dict[AnyStr, Any] = kwargs
self.__client: Elasticsearch = self.__init_client()
def __call__(self, context: HookContext) -> NoReturn:
if not self.__check_level(context.level, context.logger_level):
return
document = {
"@timestamp": datetime.now(),
"@message": context.message,
"level": utils.get_level_name(context.level),
"name": context.logger_name,
}
document.update(context.extra_data)
document = self.__clean_metadata_keys(document)
self.__client.index(index=self.__index, body=document)
def __init_client(self) -> Elasticsearch:
if self.__url is None:
raise ESEmptyUrlError('Empty Elasticsearch server.')
if self.__index is None:
raise ESEmptyIndexError('Empty Elasticsearch index.')
try:
return get_instance(self.__url, **self.__kwargs)
except Exception as error:
raise ESConfigurationError('Error creating Elasticsearch client instance') from error
@staticmethod
def __clean_metadata_keys(document: Dict[AnyStr, Any]) -> Dict[AnyStr, Any]:
new_document = document.copy()
for key in document.keys():
if re.search("^_", key) is not None:
del new_document[key]
return new_document
@staticmethod
def __check_level(log_level: int, logger_level: int) -> bool:
if log_level == DEBUG and logger_level == DEBUG:
return True
if log_level == INFO and logger_level in {DEBUG, INFO}:
return True
if log_level == WARNING and logger_level in {DEBUG, INFO, WARNING}:
return True
if log_level == ERROR and logger_level in {DEBUG, INFO, WARNING, ERROR}:
return True
if log_level == CRITICAL and logger_level in {DEBUG, INFO, WARNING, ERROR, CRITICAL}:
return True
return False
| true
| true
|
1c4633fe467d2a4c8b937c02025f2e49b2342f56
| 420
|
py
|
Python
|
instapics/forms.py
|
UMULISA12/Instagram_Ip
|
169c9326ef247c85808d9b7b8989c59740887615
|
[
"MIT"
] | null | null | null |
instapics/forms.py
|
UMULISA12/Instagram_Ip
|
169c9326ef247c85808d9b7b8989c59740887615
|
[
"MIT"
] | null | null | null |
instapics/forms.py
|
UMULISA12/Instagram_Ip
|
169c9326ef247c85808d9b7b8989c59740887615
|
[
"MIT"
] | null | null | null |
from .models import Image,Profile,Comment
from django import forms
class NewImageForm(forms.ModelForm):
class Meta:
model=Image
exclude=['profile','pub_date','name','likes','comments']
class NewProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user']
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['commenter']
| 24.705882
| 64
| 0.666667
|
from .models import Image,Profile,Comment
from django import forms
class NewImageForm(forms.ModelForm):
class Meta:
model=Image
exclude=['profile','pub_date','name','likes','comments']
class NewProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user']
class NewCommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['commenter']
| true
| true
|
1c4634872f7d494377366f5d864db3ecea175182
| 1,794
|
py
|
Python
|
dataset/dataset_test.py
|
Beta3-Data/FacialLandmark-Live-Training
|
10b2b464f1deb015a7f152bb14f120f0dc6f9de2
|
[
"MIT"
] | null | null | null |
dataset/dataset_test.py
|
Beta3-Data/FacialLandmark-Live-Training
|
10b2b464f1deb015a7f152bb14f120f0dc6f9de2
|
[
"MIT"
] | null | null | null |
dataset/dataset_test.py
|
Beta3-Data/FacialLandmark-Live-Training
|
10b2b464f1deb015a7f152bb14f120f0dc6f9de2
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from FaceLandmarksDataset import FaceLandmarksDataset
from FaceLandmarksDataset import SmartRandomCrop
from FaceLandmarksDataset import Rescale
# Ignore warnings
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # pause a bit so that plots are updated
landmarks_frame = pd.read_csv('face_landmarks.csv')
n = 65
img_name = landmarks_frame.ix[n, 0]
landmarks = landmarks_frame.ix[n, 1:].as_matrix().astype('float')
landmarks = landmarks.reshape(-1, 2)
max_xy = np.max(landmarks,axis=0)
min_xy = np.min(landmarks,axis=0)
print(max_xy)
print(min_xy)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
face_dataset = FaceLandmarksDataset(csv_file='face_landmarks.csv',
root_dir='data/image/')
fig = plt.figure()
crop = SmartRandomCrop()
scale = Rescale((256,256))
composed = transforms.Compose([SmartRandomCrop(),])
for i in range(len(face_dataset)):
sample = face_dataset[i]
sample = crop(sample)
sample = scale(sample)
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
| 30.40678
| 75
| 0.682832
|
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from FaceLandmarksDataset import FaceLandmarksDataset
from FaceLandmarksDataset import SmartRandomCrop
from FaceLandmarksDataset import Rescale
def show_landmarks(image, landmarks):
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001)
landmarks_frame = pd.read_csv('face_landmarks.csv')
n = 65
img_name = landmarks_frame.ix[n, 0]
landmarks = landmarks_frame.ix[n, 1:].as_matrix().astype('float')
landmarks = landmarks.reshape(-1, 2)
max_xy = np.max(landmarks,axis=0)
min_xy = np.min(landmarks,axis=0)
print(max_xy)
print(min_xy)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
face_dataset = FaceLandmarksDataset(csv_file='face_landmarks.csv',
root_dir='data/image/')
fig = plt.figure()
crop = SmartRandomCrop()
scale = Rescale((256,256))
composed = transforms.Compose([SmartRandomCrop(),])
for i in range(len(face_dataset)):
sample = face_dataset[i]
sample = crop(sample)
sample = scale(sample)
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
| true
| true
|
1c4634bf1a119368bd2b1ab2cfa1775e8ec4d0ce
| 9,856
|
py
|
Python
|
pyzoo/test/zoo/automl/model/test_Seq2Seq.py
|
Wesley-Du/analytics-zoo
|
e4ca11b219a43bceec99aba39cf30c8aa368e8b3
|
[
"Apache-2.0"
] | 35
|
2020-07-03T06:31:12.000Z
|
2020-07-12T08:38:10.000Z
|
pyzoo/test/zoo/automl/model/test_Seq2Seq.py
|
Angelina319/analytics-zoo
|
439f2c99d657fb20a5ff4bf510869616402ba0cf
|
[
"Apache-2.0"
] | 2
|
2018-10-31T01:20:05.000Z
|
2018-11-02T06:06:35.000Z
|
pyzoo/test/zoo/automl/model/test_Seq2Seq.py
|
Angelina319/analytics-zoo
|
439f2c99d657fb20a5ff4bf510869616402ba0cf
|
[
"Apache-2.0"
] | 4
|
2019-02-25T03:26:56.000Z
|
2019-03-06T04:41:31.000Z
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
import tempfile
import pytest
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.model.Seq2Seq import *
from zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer
from numpy.testing import assert_array_almost_equal
class TestSeq2Seq(ZooTestCase):
def setup_method(self, method):
# super().setup_method(method)
self.train_data = pd.DataFrame(data=np.random.randn(64, 4))
self.val_data = pd.DataFrame(data=np.random.randn(16, 4))
self.test_data = pd.DataFrame(data=np.random.randn(16, 4))
self.past_seq_len = 6
self.future_seq_len_1 = 1
self.future_seq_len_2 = 2
# use roll method in time_sequence
self.feat = TimeSequenceFeatureTransformer()
self.config = {
'batch_size': 32,
'epochs': 1
}
self.model_1 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_1)
self.model_2 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_2)
self.fitted = False
self.predict_1 = None
self.predict_2 = None
def teardown_method(self, method):
pass
def test_fit_eval_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
print("fit_eval_future_seq_len_1:",
self.model_1.fit_eval(x_train_1, y_train_1, **self.config))
assert self.model_1.past_seq_len == 6
assert self.model_1.feature_num == 4
assert self.model_1.future_seq_len == 1
assert self.model_1.target_col_num == 1
def test_fit_eval_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
print("fit_eval_future_seq_len_2:",
self.model_2.fit_eval(x_train_2, y_train_2, **self.config))
assert self.model_2.future_seq_len == 2
self.fitted = True
def test_evaluate_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_val_1, y_val_1 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1,
y_val_1,
metric=['mse',
'r2']))
def test_evaluate_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_val_2, y_val_2 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2,
y_val_2,
metric=['mse',
'r2']))
def test_predict_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1 = self.model_1.predict(x_test_1)
assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1)
def test_predict_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2 = self.model_2.predict(x_test_2)
assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2)
def test_save_restore_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1_before = self.model_1.predict(x_test_1)
new_model_1 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_1)
restore(dirname, model=new_model_1, config=self.config)
predict_1_after = new_model_1.predict(x_test_1)
assert_array_almost_equal(predict_1_before, predict_1_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_1_before,
predict_1_after)
new_config = {'epochs': 1}
new_model_1.fit_eval(x_train_1, y_train_1, **new_config)
finally:
shutil.rmtree(dirname)
def test_save_restore_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2_before = self.model_2.predict(x_test_2)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
predict_2_after = new_model_2.predict(x_test_2)
assert_array_almost_equal(predict_2_before, predict_2_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_2_before,
predict_2_after)
new_config = {'epochs': 2}
new_model_2.fit_eval(x_train_2, y_train_2, **new_config)
finally:
shutil.rmtree(dirname)
def test_predict_with_uncertainty(self,):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, mc=True, **self.config)
prediction, uncertainty = self.model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
prediction, uncertainty = new_model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
finally:
shutil.rmtree(dirname)
if __name__ == '__main__':
pytest.main([__file__])
| 48.078049
| 94
| 0.581879
|
import shutil
import tempfile
import pytest
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.automl.model.Seq2Seq import *
from zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer
from numpy.testing import assert_array_almost_equal
class TestSeq2Seq(ZooTestCase):
def setup_method(self, method):
self.train_data = pd.DataFrame(data=np.random.randn(64, 4))
self.val_data = pd.DataFrame(data=np.random.randn(16, 4))
self.test_data = pd.DataFrame(data=np.random.randn(16, 4))
self.past_seq_len = 6
self.future_seq_len_1 = 1
self.future_seq_len_2 = 2
self.feat = TimeSequenceFeatureTransformer()
self.config = {
'batch_size': 32,
'epochs': 1
}
self.model_1 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_1)
self.model_2 = LSTMSeq2Seq(check_optional_config=False,
future_seq_len=self.future_seq_len_2)
self.fitted = False
self.predict_1 = None
self.predict_2 = None
def teardown_method(self, method):
pass
def test_fit_eval_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
print("fit_eval_future_seq_len_1:",
self.model_1.fit_eval(x_train_1, y_train_1, **self.config))
assert self.model_1.past_seq_len == 6
assert self.model_1.feature_num == 4
assert self.model_1.future_seq_len == 1
assert self.model_1.target_col_num == 1
def test_fit_eval_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
print("fit_eval_future_seq_len_2:",
self.model_2.fit_eval(x_train_2, y_train_2, **self.config))
assert self.model_2.future_seq_len == 2
self.fitted = True
def test_evaluate_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_val_1, y_val_1 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1,
y_val_1,
metric=['mse',
'r2']))
def test_evaluate_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_val_2, y_val_2 = self.feat._roll_train(self.val_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2,
y_val_2,
metric=['mse',
'r2']))
def test_predict_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1 = self.model_1.predict(x_test_1)
assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1)
def test_predict_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2 = self.model_2.predict(x_test_2)
assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2)
def test_save_restore_1(self):
x_train_1, y_train_1 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_1)
x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_1.fit_eval(x_train_1, y_train_1, **self.config)
predict_1_before = self.model_1.predict(x_test_1)
new_model_1 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_1)
restore(dirname, model=new_model_1, config=self.config)
predict_1_after = new_model_1.predict(x_test_1)
assert_array_almost_equal(predict_1_before, predict_1_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_1_before,
predict_1_after)
new_config = {'epochs': 1}
new_model_1.fit_eval(x_train_1, y_train_1, **new_config)
finally:
shutil.rmtree(dirname)
def test_save_restore_2(self):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, **self.config)
predict_2_before = self.model_2.predict(x_test_2)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
predict_2_after = new_model_2.predict(x_test_2)
assert_array_almost_equal(predict_2_before, predict_2_after, decimal=2), \
"Prediction values are not the same after restore: " \
"predict before is {}, and predict after is {}".format(predict_2_before,
predict_2_after)
new_config = {'epochs': 2}
new_model_2.fit_eval(x_train_2, y_train_2, **new_config)
finally:
shutil.rmtree(dirname)
def test_predict_with_uncertainty(self,):
x_train_2, y_train_2 = self.feat._roll_train(self.train_data,
past_seq_len=self.past_seq_len,
future_seq_len=self.future_seq_len_2)
x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)
self.model_2.fit_eval(x_train_2, y_train_2, mc=True, **self.config)
prediction, uncertainty = self.model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
new_model_2 = LSTMSeq2Seq(check_optional_config=False)
dirname = tempfile.mkdtemp(prefix="automl_test_feature")
try:
save(dirname, model=self.model_2)
restore(dirname, model=new_model_2, config=self.config)
prediction, uncertainty = new_model_2.predict_with_uncertainty(x_test_2, n_iter=2)
assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)
assert np.any(uncertainty)
finally:
shutil.rmtree(dirname)
if __name__ == '__main__':
pytest.main([__file__])
| true
| true
|
1c46378d907548f7177d7694871d9e0601053adf
| 61,104
|
py
|
Python
|
python/ccxt/bitfinex2.py
|
Jsn2win/ccxt
|
fff369de2192a3b7c71ab1d29d0923db8d5af913
|
[
"MIT"
] | null | null | null |
python/ccxt/bitfinex2.py
|
Jsn2win/ccxt
|
fff369de2192a3b7c71ab1d29d0923db8d5af913
|
[
"MIT"
] | null | null | null |
python/ccxt/bitfinex2.py
|
Jsn2win/ccxt
|
fff369de2192a3b7c71ab1d29d0923db8d5af913
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.bitfinex import bitfinex
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
class bitfinex2(bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v2',
'certified': False,
'pro': False,
# new metainfo interface
'has': {
'CORS': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrder': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderTrades': True,
'fetchStatus': True,
'fetchTickers': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v1': 'https://api.bitfinex.com',
'public': 'https://api-pub.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'doc': [
'https://docs.bitfinex.com/v2/docs/',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'v1': {
'get': [
'symbols',
'symbols_details',
],
},
'public': {
'get': [
'conf/{config}',
'conf/pub:{action}:{object}',
'conf/pub:{action}:{object}:{detail}',
'conf/pub:map:{object}',
'conf/pub:map:{object}:{detail}',
'conf/pub:map:currency:{detail}',
'conf/pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH
'conf/pub:map:currency:label', # verbose friendly names, BNT > Bancor
'conf/pub:map:currency:unit', # maps symbols to unit of measure where applicable
'conf/pub:map:currency:undl', # maps derivatives symbols to their underlying currency
'conf/pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on
'conf/pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs
'conf/pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745
'conf/pub:map:tx:method',
'conf/pub:list:{object}',
'conf/pub:list:{object}:{detail}',
'conf/pub:list:currency',
'conf/pub:list:pair:exchange',
'conf/pub:list:pair:margin',
'conf/pub:list:pair:futures',
'conf/pub:list:competitions',
'conf/pub:info:{object}',
'conf/pub:info:{object}:{detail}',
'conf/pub:info:pair',
'conf/pub:info:tx:status', # [deposit, withdrawal] statuses 1 = active, 0 = maintenance
'conf/pub:fees',
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}:{side}/last',
'stats1/{key}:{size}:{symbol}:{side}/hist',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}/last',
'stats1/{key}:{size}:{symbol}/hist',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
'status/{type}',
'status/deriv',
'liquidations/hist',
'rankings/{key}:{timeframe}:{symbol}/{section}',
'rankings/{key}:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
'calc/fx',
],
},
'private': {
'post': [
# 'auth/r/orders/{symbol}/new', # outdated
# 'auth/r/stats/perf:{timeframe}/hist', # outdated
'auth/r/wallets',
'auth/r/wallets/hist',
'auth/r/orders',
'auth/r/orders/{symbol}',
'auth/w/order/submit',
'auth/w/order/update',
'auth/w/order/cancel',
'auth/w/order/multi',
'auth/w/order/cancel/multi',
'auth/r/orders/{symbol}/hist',
'auth/r/orders/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/trades/hist',
'auth/r/ledgers/{currency}/hist',
'auth/r/ledgers/hist',
'auth/r/info/margin/{key}',
'auth/r/info/margin/base',
'auth/r/info/margin/sym_all',
'auth/r/positions',
'auth/w/position/claim',
'auth/r/positions/hist',
'auth/r/positions/audit',
'auth/r/positions/snap',
'auth/w/deriv/collateral/set',
'auth/w/deriv/collateral/limits',
'auth/r/funding/offers',
'auth/r/funding/offers/{symbol}',
'auth/w/funding/offer/submit',
'auth/w/funding/offer/cancel',
'auth/w/funding/offer/cancel/all',
'auth/w/funding/close',
'auth/w/funding/auto',
'auth/w/funding/keep',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/offers/hist',
'auth/r/funding/loans',
'auth/r/funding/loans/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits',
'auth/r/funding/credits/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/funding/trades/hist',
'auth/r/info/funding/{key}',
'auth/r/info/user',
'auth/r/logins/hist',
'auth/w/transfer',
'auth/w/deposit/address',
'auth/w/deposit/invoice',
'auth/w/withdraw',
'auth/r/movements/{currency}/hist',
'auth/r/movements/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/price:{symbol}:{price}/del',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
'auth/w/settings/set',
'auth/r/settings',
'auth/w/settings/del',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.0004,
'BCH': 0.0001,
'ETH': 0.00135,
'EOS': 0.0,
'LTC': 0.001,
'OMG': 0.15097,
'IOT': 0.0,
'NEO': 0.0,
'ETC': 0.01,
'XRP': 0.02,
'ETP': 0.01,
'ZEC': 0.001,
'BTG': 0.0,
'DASH': 0.01,
'XMR': 0.0001,
'QTM': 0.01,
'EDO': 0.23687,
'DAT': 9.8858,
'AVT': 1.1251,
'SAN': 0.35977,
'USDT': 5.0,
'SPK': 16.971,
'BAT': 1.1209,
'GNT': 2.8789,
'SNT': 9.0848,
'QASH': 1.726,
'YYW': 7.9464,
},
},
},
'options': {
'precision': 'R0', # P0, P1, P2, P3, P4, R0
# convert 'EXCHANGE MARKET' to lowercase 'market'
# convert 'EXCHANGE LIMIT' to lowercase 'limit'
# everything else remains uppercase
'exchangeTypes': {
# 'MARKET': None,
'EXCHANGE MARKET': 'market',
# 'LIMIT': None,
'EXCHANGE LIMIT': 'limit',
# 'STOP': None,
# 'EXCHANGE STOP': None,
# 'TRAILING STOP': None,
# 'EXCHANGE TRAILING STOP': None,
# 'FOK': None,
# 'EXCHANGE FOK': None,
# 'STOP LIMIT': None,
# 'EXCHANGE STOP LIMIT': None,
# 'IOC': None,
# 'EXCHANGE IOC': None,
},
# convert 'market' to 'EXCHANGE MARKET'
# convert 'limit' 'EXCHANGE LIMIT'
# everything else remains as is
'orderTypes': {
'market': 'EXCHANGE MARKET',
'limit': 'EXCHANGE LIMIT',
},
'fiat': {
'USD': 'USD',
'EUR': 'EUR',
'JPY': 'JPY',
'GBP': 'GBP',
},
},
'exceptions': {
'exact': {
'10020': BadRequest,
'10100': AuthenticationError,
'10114': InvalidNonce,
'20060': OnMaintenance,
},
'broad': {
'address': InvalidAddress,
'available balance is only': InsufficientFunds,
'not enough exchange balance': InsufficientFunds,
'Order not found': OrderNotFound,
'symbol: invalid': BadSymbol,
'Invalid order': InvalidOrder,
},
},
})
def is_fiat(self, code):
return(code in self.options['fiat'])
def get_currency_id(self, code):
return 'f' + code
def fetch_status(self, params={}):
#
# [1] # operative
# [0] # maintenance
#
response = self.publicGetPlatformStatus(params)
status = self.safe_value(response, 0)
formattedStatus = 'ok' if (status == 1) else 'maintenance'
self.status = self.extend(self.status, {
'status': formattedStatus,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
# todo drop v1 in favor of v2 configs
# pub:list:pair:exchange,pub:list:pair:margin,pub:list:pair:futures,pub:info:pair
v2response = self.publicGetConfPubListPairFutures(params)
v1response = self.v1GetSymbolsDetails(params)
futuresMarketIds = self.safe_value(v2response, 0, [])
result = []
for i in range(0, len(v1response)):
market = v1response[i]
id = self.safe_string_upper(market, 'pair')
spot = True
if self.in_array(id, futuresMarketIds):
spot = False
futures = not spot
type = 'spot' if spot else 'futures'
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = 't' + id
baseId = self.get_currency_id(baseId)
quoteId = self.get_currency_id(quoteId)
precision = {
'price': self.safe_integer(market, 'price_precision'),
'amount': 8, # https://github.com/ccxt/ccxt/issues/7310
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
'type': type,
'swap': False,
'spot': spot,
'futures': futures,
})
return result
def fetch_currencies(self, params={}):
labels = [
'pub:list:currency',
'pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH
'pub:map:currency:label', # verbose friendly names, BNT > Bancor
'pub:map:currency:unit', # maps symbols to unit of measure where applicable
'pub:map:currency:undl', # maps derivatives symbols to their underlying currency
'pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on
'pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs
'pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745
]
config = ','.join(labels)
request = {
'config': config,
}
response = self.publicGetConfConfig(self.extend(request, params))
#
# [
#
# a list of symbols
# ["AAA","ABS","ADA"],
#
# # sym
# # maps symbols to their API symbols, BAB > BCH
# [
# ['BAB', 'BCH'],
# ['CNHT', 'CNHt'],
# ['DSH', 'DASH'],
# ['IOT', 'IOTA'],
# ['LES', 'LEO-EOS'],
# ['LET', 'LEO-ERC20'],
# ['STJ', 'STORJ'],
# ['TSD', 'TUSD'],
# ['UDC', 'USDC'],
# ['USK', 'USDK'],
# ['UST', 'USDt'],
# ['USTF0', 'USDt0'],
# ['XCH', 'XCHF'],
# ['YYW', 'YOYOW'],
# # ...
# ],
# # label
# # verbose friendly names, BNT > Bancor
# [
# ['BAB', 'Bitcoin Cash'],
# ['BCH', 'Bitcoin Cash'],
# ['LEO', 'Unus Sed LEO'],
# ['LES', 'Unus Sed LEO(EOS)'],
# ['LET', 'Unus Sed LEO(ERC20)'],
# # ...
# ],
# # unit
# # maps symbols to unit of measure where applicable
# [
# ['IOT', 'Mi|MegaIOTA'],
# ],
# # undl
# # maps derivatives symbols to their underlying currency
# [
# ['USTF0', 'UST'],
# ['BTCF0', 'BTC'],
# ['ETHF0', 'ETH'],
# ],
# # pool
# # maps symbols to underlying network/protocol they operate on
# [
# ['SAN', 'ETH'], ['OMG', 'ETH'], ['AVT', 'ETH'], ['EDO', 'ETH'],
# ['ESS', 'ETH'], ['ATD', 'EOS'], ['ADD', 'EOS'], ['MTO', 'EOS'],
# ['PNK', 'ETH'], ['BAB', 'BCH'], ['WLO', 'XLM'], ['VLD', 'ETH'],
# ['BTT', 'TRX'], ['IMP', 'ETH'], ['SCR', 'ETH'], ['GNO', 'ETH'],
# # ...
# ],
# # explorer
# # maps symbols to their recognised block explorer URLs
# [
# [
# 'AIO',
# [
# "https://mainnet.aion.network",
# "https://mainnet.aion.network/#/account/VAL",
# "https://mainnet.aion.network/#/transaction/VAL"
# ]
# ],
# # ...
# ],
# # fee
# # maps currencies to their withdrawal fees
# [
# ["AAA",[0,0]],
# ["ABS",[0,131.3]],
# ["ADA",[0,0.3]],
# ],
# ]
#
indexed = {
'sym': self.index_by(self.safe_value(response, 1, []), 0),
'label': self.index_by(self.safe_value(response, 2, []), 0),
'unit': self.index_by(self.safe_value(response, 3, []), 0),
'undl': self.index_by(self.safe_value(response, 4, []), 0),
'pool': self.index_by(self.safe_value(response, 5, []), 0),
'explorer': self.index_by(self.safe_value(response, 6, []), 0),
'fees': self.index_by(self.safe_value(response, 7, []), 0),
}
ids = self.safe_value(response, 0, [])
result = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
label = self.safe_value(indexed['label'], id, [])
name = self.safe_string(label, 1)
pool = self.safe_value(indexed['pool'], id, [])
type = self.safe_string(pool, 1)
feeValues = self.safe_value(indexed['fees'], id, [])
fees = self.safe_value(feeValues, 1, [])
fee = self.safe_float(fees, 1)
precision = 8 # default precision, todo: fix "magic constants"
id = 'f' + id
result[code] = {
'id': id,
'code': code,
'info': [id, label, pool, feeValues],
'type': type,
'name': name,
'active': True,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'price': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': fee,
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
# self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)
self.load_markets()
response = self.privatePostAuthRWallets(params)
balanceType = self.safe_string(params, 'type', 'exchange')
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
accountType = balance[0]
currency = balance[1]
total = balance[2]
available = balance[4]
if accountType == balanceType:
if currency[0] == 't':
currency = currency[1:]
code = self.safe_currency_code(currency)
account = self.account()
# do not fill in zeroes and missing values in the parser
# rewrite and unify the following to use the unified parseBalance
account['total'] = total
if not available:
if available == 0:
account['free'] = 0
account['used'] = total
else:
account['free'] = total
else:
account['free'] = available
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
precision = self.safe_value(self.options, 'precision', 'R0')
request = {
'symbol': self.market_id(symbol),
'precision': precision,
}
if limit is not None:
request['len'] = limit # 25 or 100
fullRequest = self.extend(request, params)
orderbook = self.publicGetBookSymbolPrecision(fullRequest)
timestamp = self.milliseconds()
result = {
'bids': [],
'asks': [],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0
for i in range(0, len(orderbook)):
order = orderbook[i]
price = order[priceIndex]
amount = abs(order[2])
side = 'bids' if (order[2] > 0) else 'asks'
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
length = len(ticker)
last = ticker[length - 4]
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker[length - 2],
'low': ticker[length - 1],
'bid': ticker[length - 10],
'bidVolume': None,
'ask': ticker[length - 8],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': ticker[length - 6],
'percentage': ticker[length - 5] * 100,
'average': None,
'baseVolume': ticker[length - 3],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
else:
request['symbols'] = 'ALL'
tickers = self.publicGetTickers(self.extend(request, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker[0]
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_symbol(self, marketId):
if marketId is None:
return marketId
marketId = marketId.replace('t', '')
baseId = None
quoteId = None
if marketId.find(':') >= 0:
parts = marketId.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
#
# fetchMyTrades(private)
#
# [
# ID,
# PAIR,
# MTS_CREATE,
# ORDER_ID,
# EXEC_AMOUNT,
# EXEC_PRICE,
# ORDER_TYPE,
# ORDER_PRICE,
# MAKER,
# FEE,
# FEE_CURRENCY,
# ...
# ]
#
tradeLength = len(trade)
isPrivate = (tradeLength > 5)
id = str(trade[0])
amountIndex = 4 if isPrivate else 2
amount = trade[amountIndex]
cost = None
priceIndex = 5 if isPrivate else 3
price = trade[priceIndex]
side = None
orderId = None
takerOrMaker = None
type = None
fee = None
symbol = None
timestampIndex = 2 if isPrivate else 1
timestamp = trade[timestampIndex]
if isPrivate:
marketId = trade[1]
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
orderId = str(trade[3])
takerOrMaker = 'maker' if (trade[8] == 1) else 'taker'
feeCost = trade[9]
feeCurrency = self.safe_currency_code(trade[10])
if feeCost is not None:
feeCost = -feeCost
if symbol in self.markets:
feeCost = self.fee_to_precision(symbol, feeCost)
else:
currencyId = 'f' + feeCurrency
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
feeCost = self.currency_to_precision(currency['code'], feeCost)
fee = {
'cost': float(feeCost),
'currency': feeCurrency,
}
orderType = trade[6]
type = self.safe_string(self.options['exchangeTypes'], orderType)
if symbol is None:
if market is not None:
symbol = market['symbol']
if amount is not None:
side = 'sell' if (amount < 0) else 'buy'
amount = abs(amount)
if cost is None:
if price is not None:
cost = amount * price
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'side': side,
'type': type,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
sort = '-1'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
sort = '1'
if limit is not None:
request['limit'] = limit # default 120, max 5000
request['sort'] = sort
response = self.publicGetTradesSymbolHist(self.extend(request, params))
#
# [
# [
# ID,
# MTS, # timestamp
# AMOUNT,
# PRICE
# ]
# ]
#
trades = self.sort_by(response, 1)
return self.parse_trades(trades, market, None, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=100, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100 # default 100, max 5000
if since is None:
since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
'sort': 1,
'start': since,
'limit': limit,
}
response = self.publicGetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
#
# [
# [1591503840000,0.025069,0.025068,0.025069,0.025068,1.97828998],
# [1591504500000,0.025065,0.025065,0.025065,0.025065,1.0164],
# [1591504620000,0.025062,0.025062,0.025062,0.025062,0.5],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
if status is None:
return status
parts = status.split(' ')
state = self.safe_string(parts, 0)
statuses = {
'ACTIVE': 'open',
'PARTIALLY': 'open',
'EXECUTED': 'closed',
'CANCELED': 'canceled',
'INSUFFICIENT': 'canceled',
'RSN_DUST': 'rejected',
'RSN_PAUSE': 'rejected',
}
return self.safe_string(statuses, state, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 0)
symbol = None
marketId = self.safe_string(order, 3)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = self.parse_symbol(marketId)
if (symbol is None) and (market is not None):
symbol = market['symbol']
# https://github.com/ccxt/ccxt/issues/6686
# timestamp = self.safe_timestamp(order, 5)
timestamp = self.safe_integer(order, 5)
remaining = abs(self.safe_float(order, 6))
amount = abs(self.safe_float(order, 7))
filled = amount - remaining
side = 'sell' if (order[7] < 0) else 'buy'
orderType = self.safe_string(order, 8)
type = self.safe_string(self.safe_value(self.options, 'exchangeTypes'), orderType)
status = None
statusString = self.safe_string(order, 13)
if statusString is not None:
parts = statusString.split(' @ ')
status = self.parse_order_status(self.safe_string(parts, 0))
price = self.safe_float(order, 16)
average = self.safe_float(order, 17)
cost = price * filled
clientOrderId = self.safe_string(order, 2)
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderTypes = self.safe_value(self.options, 'orderTypes', {})
orderType = self.safe_string_upper(orderTypes, type, type)
amount = -amount if (side == 'sell') else amount
request = {
'symbol': market['id'],
'type': orderType,
'amount': self.number_to_string(amount),
}
if (orderType == 'LIMIT') or (orderType == 'EXCHANGE LIMIT'):
request['price'] = self.number_to_string(price)
elif (orderType == 'STOP') or (orderType == 'EXCHANGE STOP'):
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'STOP LIMIT') or (orderType == 'EXCHANGE STOP LIMIT'):
priceAuxLimit = self.safe_float(params, 'price_aux_limit')
stopPrice = self.safe_float(params, 'stopPrice')
if priceAuxLimit is None:
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a price_aux_limit parameter for a ' + orderType + ' order')
else:
request['price_aux_limit'] = self.number_to_string(price)
else:
request['price_aux_limit'] = self.number_to_string(priceAuxLimit)
if stopPrice is None:
stopPrice = price
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'TRAILING STOP') or (orderType == 'EXCHANGE TRAILING STOP'):
priceTrailing = self.safe_float(params, 'price_trailing')
request['price_trailing'] = self.number_to_string(priceTrailing)
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'FOK') or (orderType == 'EXCHANGE FOK') or (orderType == 'IOC') or (orderType == 'EXCHANGE IOC'):
request['price'] = self.number_to_string(price)
params = self.omit(params, ['stopPrice', 'price_aux_limit', 'price_trailing'])
clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')
if clientOrderId is not None:
request['cid'] = clientOrderId
params = self.omit(params, ['cid', 'clientOrderId'])
response = self.privatePostAuthWOrderSubmit(self.extend(request, params))
#
# [
# 1578784364.748, # Millisecond Time Stamp of the update
# "on-req", # Purpose of notification('on-req', 'oc-req', 'uca', 'fon-req', 'foc-req')
# null, # Unique ID of the message
# null, # Ignore
# [
# [
# 37271830598, # Order ID
# null, # Group ID
# 1578784364748, # Client Order ID
# "tBTCUST", # Pair
# 1578784364748, # Millisecond timestamp of creation
# 1578784364748, # Millisecond timestamp of update
# -0.005, # Positive means buy, negative means sell
# -0.005, # Original amount
# "EXCHANGE LIMIT", # Order type(LIMIT, MARKET, STOP, TRAILING STOP, EXCHANGE MARKET, EXCHANGE LIMIT, EXCHANGE STOP, EXCHANGE TRAILING STOP, FOK, EXCHANGE FOK, IOC, EXCHANGE IOC)
# null, # Previous order type
# null, # Millisecond timestamp of Time-In-Force: automatic order cancellation
# null, # Ignore
# 0, # Flags(see https://docs.bitfinex.com/docs/flag-values)
# "ACTIVE", # Order Status
# null, # Ignore
# null, # Ignore
# 20000, # Price
# 0, # Average price
# 0, # The trailing price
# 0, # Auxiliary Limit price(for STOP LIMIT)
# null, # Ignore
# null, # Ignore
# null, # Ignore
# 0, # 1 - hidden order
# null, # If another order caused self order to be placed(OCO) self will be that other order's ID
# null, # Ignore
# null, # Ignore
# null, # Ignore
# "API>BFX", # Origin of action: BFX, ETHFX, API>BFX, API>ETHFX
# null, # Ignore
# null, # Ignore
# null # Meta
# ]
# ],
# null, # Error code
# "SUCCESS", # Status(SUCCESS, ERROR, FAILURE, ...)
# "Submitting 1 orders." # Text of the notification
# ]
#
status = self.safe_string(response, 6)
if status != 'SUCCESS':
errorCode = response[5]
errorText = response[7]
raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(#' + errorCode + ')')
orders = self.safe_value(response, 4, [])
order = self.safe_value(orders, 0)
return self.parse_order(order, market)
def cancel_all_orders(self, symbol=None, params={}):
request = {
'all': 1,
}
response = self.privatePostAuthWOrderCancelMulti(self.extend(request, params))
orders = self.safe_value(response, 4, [])
return self.parse_orders(orders)
def cancel_order(self, id, symbol=None, params={}):
cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id
request = None
if cid is not None:
cidDate = self.safe_value(params, 'cidDate') # client order id date
if cidDate is None:
raise InvalidOrder(self.id + " canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')")
request = {
'cid': cid,
'cid_date': cidDate,
}
params = self.omit(params, ['cid', 'clientOrderId'])
else:
request = {
'id': int(id),
}
response = self.privatePostAuthWOrderCancel(self.extend(request, params))
order = self.safe_value(response, 4)
return self.parse_order(order)
def fetch_open_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_open_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_closed_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_closed_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrders(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbol(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# returns the most recent closed or canceled orders up to circa two weeks ago
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrdersHist(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbolHist(self.extend(request, params))
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 2500
return self.parse_orders(response, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
orderId = int(id)
request = {
'id': orderId,
'symbol': market['id'],
}
# valid for trades upto 10 days old
response = self.privatePostAuthROrderSymbolIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'end': self.milliseconds(),
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 1000
method = 'privatePostAuthRTradesHist'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'privatePostAuthRTradesSymbolHist'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_deposit_address(self, code, params={}):
self.load_markets()
request = {
'op_renew': 1,
}
response = self.fetch_deposit_address(code, self.extend(request, params))
return response
def fetch_deposit_address(self, code, params={}):
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'op_renew': 0, # a value of 1 will generate a new address
}
response = self.privatePostAuthWDepositAddress(self.extend(request, params))
#
# [
# 1582269616687, # MTS Millisecond Time Stamp of the update
# 'acc_dep', # TYPE Purpose of notification 'acc_dep' for account deposit
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# null, # PLACEHOLDER
# 'BITCOIN', # METHOD Method of deposit
# 'BTC', # CURRENCY_CODE Currency code of new address
# null, # PLACEHOLDER
# '1BC9PZqpUmjyEB54uggn8TFKj49zSDYzqG', # ADDRESS
# null, # POOL_ADDRESS
# ],
# null, # CODE null or integer work in progress
# 'SUCCESS', # STATUS Status of the notification, SUCCESS, ERROR, FAILURE
# 'success', # TEXT Text of the notification
# ]
#
result = self.safe_value(response, 4, [])
poolAddress = self.safe_string(result, 5)
address = self.safe_string(result, 4) if (poolAddress is None) else poolAddress
tag = None if (poolAddress is None) else self.safe_string(result, 4)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
'SUCCESS': 'ok',
'ERROR': 'failed',
'FAILURE': 'failed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
# fetchTransactions
#
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
#
transactionLength = len(transaction)
timestamp = None
updated = None
code = None
amount = None
id = None
status = None
tag = None
type = None
feeCost = None
txid = None
addressTo = None
if transactionLength < 9:
data = self.safe_value(transaction, 4, [])
timestamp = self.safe_integer(transaction, 0)
if currency is not None:
code = currency['code']
feeCost = self.safe_float(data, 8)
if feeCost is not None:
feeCost = -feeCost
amount = self.safe_float(data, 5)
id = self.safe_value(data, 0)
status = 'ok'
if id == 0:
id = None
status = 'failed'
tag = self.safe_string(data, 3)
type = 'withdrawal'
else:
id = self.safe_string(transaction, 0)
timestamp = self.safe_integer(transaction, 5)
updated = self.safe_integer(transaction, 6)
status = self.parse_transaction_status(self.safe_string(transaction, 9))
amount = self.safe_float(transaction, 12)
if amount is not None:
if amount < 0:
type = 'withdrawal'
else:
type = 'deposit'
feeCost = self.safe_float(transaction, 13)
if feeCost is not None:
feeCost = -feeCost
addressTo = self.safe_string(transaction, 16)
txid = self.safe_string(transaction, 20)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': addressTo, # self is actually the tag for XRP transfers(the address is missing)
'addressTo': addressTo,
'tagFrom': None,
'tag': tag, # refix it properly for the tag from description
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
method = 'privatePostAuthRMovementsHist'
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method = 'privatePostAuthRMovementsCurrencyHist'
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # max 1000
response = getattr(self, method)(self.extend(request, params))
#
# [
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
# ]
#
return self.parse_transactions(response, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
response = self.privatePostAuthWWithdraw(self.extend(request, params))
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
text = self.safe_string(response, 7)
if text != 'success':
self.throw_broadly_matched_exception(self.exceptions['broad'], text, text)
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'address': address,
})
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostPositions(params)
#
# [
# [
# "tBTCUSD", # SYMBOL
# "ACTIVE", # STATUS
# 0.0195, # AMOUNT
# 8565.0267019, # BASE_PRICE
# 0, # MARGIN_FUNDING
# 0, # MARGIN_FUNDING_TYPE
# -0.33455568705000516, # PL
# -0.0003117550117425625, # PL_PERC
# 7045.876419249083, # PRICE_LIQ
# 3.0673001895895604, # LEVERAGE
# null, # _PLACEHOLDER
# 142355652, # POSITION_ID
# 1574002216000, # MTS_CREATE
# 1574002216000, # MTS_UPDATE
# null, # _PLACEHOLDER
# 0, # TYPE
# null, # _PLACEHOLDER
# 0, # COLLATERAL
# 0, # COLLATERAL_MIN
# # META
# {
# "reason":"TRADE",
# "order_id":34271018124,
# "liq_stage":null,
# "trade_price":"8565.0267019",
# "trade_amount":"0.0195",
# "order_id_oppo":34277498022
# }
# ]
# ]
#
# todo unify parsePosition/parsePositions
return response
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'v1':
request = api + request
else:
request = self.version + request
url = self.urls['api'][api] + '/' + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query)
auth = '/api/' + request + nonce + body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384)
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.apiKey,
'bfx-signature': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if response:
if 'message' in response:
if response['message'].find('not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
elif response == '':
raise ExchangeError(self.id + ' returned empty response')
return response
def handle_errors(self, statusCode, statusText, url, method, responseHeaders, responseBody, response, requestHeaders, requestBody):
if statusCode == 500:
# See https://docs.bitfinex.com/docs/abbreviations-glossary#section-errorinfo-codes
errorCode = self.number_to_string(response[1])
errorText = response[2]
feedback = self.id + ' ' + errorText
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorText, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorText, feedback)
raise ExchangeError(self.id + ' ' + errorText + '(#' + errorCode + ')')
| 41.539089
| 207
| 0.45961
|
port bitfinex
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
class bitfinex2(bitfinex):
def describe(self):
return self.deep_extend(super(bitfinex2, self).describe(), {
'id': 'bitfinex2',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v2',
'certified': False,
'pro': False,
'has': {
'CORS': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrder': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchFundingFees': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderTrades': True,
'fetchStatus': True,
'fetchTickers': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'rateLimit': 1500,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v1': 'https://api.bitfinex.com',
'public': 'https://api-pub.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'doc': [
'https://docs.bitfinex.com/v2/docs/',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
'fees': 'https://www.bitfinex.com/fees',
},
'api': {
'v1': {
'get': [
'symbols',
'symbols_details',
],
},
'public': {
'get': [
'conf/{config}',
'conf/pub:{action}:{object}',
'conf/pub:{action}:{object}:{detail}',
'conf/pub:map:{object}',
'conf/pub:map:{object}:{detail}',
'conf/pub:map:currency:{detail}',
'conf/pub:map:currency:sym',
'conf/pub:map:currency:label',
'conf/pub:map:currency:unit',
'conf/pub:map:currency:undl',
'conf/pub:map:currency:pool',
'conf/pub:map:currency:explorer',
'conf/pub:map:currency:tx:fee',
'conf/pub:map:tx:method',
'conf/pub:list:{object}',
'conf/pub:list:{object}:{detail}',
'conf/pub:list:currency',
'conf/pub:list:pair:exchange',
'conf/pub:list:pair:margin',
'conf/pub:list:pair:futures',
'conf/pub:list:competitions',
'conf/pub:info:{object}',
'conf/pub:info:{object}:{detail}',
'conf/pub:info:pair',
'conf/pub:info:tx:status',
'conf/pub:fees',
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}:{side}/last',
'stats1/{key}:{size}:{symbol}:{side}/hist',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}/last',
'stats1/{key}:{size}:{symbol}/hist',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
'status/{type}',
'status/deriv',
'liquidations/hist',
'rankings/{key}:{timeframe}:{symbol}/{section}',
'rankings/{key}:{timeframe}:{symbol}/hist',
],
'post': [
'calc/trade/avg',
'calc/fx',
],
},
'private': {
'post': [
'auth/r/wallets',
'auth/r/wallets/hist',
'auth/r/orders',
'auth/r/orders/{symbol}',
'auth/w/order/submit',
'auth/w/order/update',
'auth/w/order/cancel',
'auth/w/order/multi',
'auth/w/order/cancel/multi',
'auth/r/orders/{symbol}/hist',
'auth/r/orders/hist',
'auth/r/order/{symbol}:{id}/trades',
'auth/r/trades/{symbol}/hist',
'auth/r/trades/hist',
'auth/r/ledgers/{currency}/hist',
'auth/r/ledgers/hist',
'auth/r/info/margin/{key}',
'auth/r/info/margin/base',
'auth/r/info/margin/sym_all',
'auth/r/positions',
'auth/w/position/claim',
'auth/r/positions/hist',
'auth/r/positions/audit',
'auth/r/positions/snap',
'auth/w/deriv/collateral/set',
'auth/w/deriv/collateral/limits',
'auth/r/funding/offers',
'auth/r/funding/offers/{symbol}',
'auth/w/funding/offer/submit',
'auth/w/funding/offer/cancel',
'auth/w/funding/offer/cancel/all',
'auth/w/funding/close',
'auth/w/funding/auto',
'auth/w/funding/keep',
'auth/r/funding/offers/{symbol}/hist',
'auth/r/funding/offers/hist',
'auth/r/funding/loans',
'auth/r/funding/loans/hist',
'auth/r/funding/loans/{symbol}',
'auth/r/funding/loans/{symbol}/hist',
'auth/r/funding/credits',
'auth/r/funding/credits/hist',
'auth/r/funding/credits/{symbol}',
'auth/r/funding/credits/{symbol}/hist',
'auth/r/funding/trades/{symbol}/hist',
'auth/r/funding/trades/hist',
'auth/r/info/funding/{key}',
'auth/r/info/user',
'auth/r/logins/hist',
'auth/w/transfer',
'auth/w/deposit/address',
'auth/w/deposit/invoice',
'auth/w/withdraw',
'auth/r/movements/{currency}/hist',
'auth/r/movements/hist',
'auth/r/alerts',
'auth/w/alert/set',
'auth/w/alert/price:{symbol}:{price}/del',
'auth/w/alert/{type}:{symbol}:{price}/del',
'auth/calc/order/avail',
'auth/w/settings/set',
'auth/r/settings',
'auth/w/settings/del',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
'funding': {
'withdraw': {
'BTC': 0.0004,
'BCH': 0.0001,
'ETH': 0.00135,
'EOS': 0.0,
'LTC': 0.001,
'OMG': 0.15097,
'IOT': 0.0,
'NEO': 0.0,
'ETC': 0.01,
'XRP': 0.02,
'ETP': 0.01,
'ZEC': 0.001,
'BTG': 0.0,
'DASH': 0.01,
'XMR': 0.0001,
'QTM': 0.01,
'EDO': 0.23687,
'DAT': 9.8858,
'AVT': 1.1251,
'SAN': 0.35977,
'USDT': 5.0,
'SPK': 16.971,
'BAT': 1.1209,
'GNT': 2.8789,
'SNT': 9.0848,
'QASH': 1.726,
'YYW': 7.9464,
},
},
},
'options': {
'precision': 'R0',
'exchangeTypes': {
'EXCHANGE MARKET': 'market',
'EXCHANGE LIMIT': 'limit',
},
'orderTypes': {
'market': 'EXCHANGE MARKET',
'limit': 'EXCHANGE LIMIT',
},
'fiat': {
'USD': 'USD',
'EUR': 'EUR',
'JPY': 'JPY',
'GBP': 'GBP',
},
},
'exceptions': {
'exact': {
'10020': BadRequest,
'10100': AuthenticationError,
'10114': InvalidNonce,
'20060': OnMaintenance,
},
'broad': {
'address': InvalidAddress,
'available balance is only': InsufficientFunds,
'not enough exchange balance': InsufficientFunds,
'Order not found': OrderNotFound,
'symbol: invalid': BadSymbol,
'Invalid order': InvalidOrder,
},
},
})
def is_fiat(self, code):
return(code in self.options['fiat'])
def get_currency_id(self, code):
return 'f' + code
def fetch_status(self, params={}):
response = self.publicGetPlatformStatus(params)
status = self.safe_value(response, 0)
formattedStatus = 'ok' if (status == 1) else 'maintenance'
self.status = self.extend(self.status, {
'status': formattedStatus,
'updated': self.milliseconds(),
})
return self.status
def fetch_markets(self, params={}):
v2response = self.publicGetConfPubListPairFutures(params)
v1response = self.v1GetSymbolsDetails(params)
futuresMarketIds = self.safe_value(v2response, 0, [])
result = []
for i in range(0, len(v1response)):
market = v1response[i]
id = self.safe_string_upper(market, 'pair')
spot = True
if self.in_array(id, futuresMarketIds):
spot = False
futures = not spot
type = 'spot' if spot else 'futures'
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = 't' + id
baseId = self.get_currency_id(baseId)
quoteId = self.get_currency_id(quoteId)
precision = {
'price': self.safe_integer(market, 'price_precision'),
'amount': 8,
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
'type': type,
'swap': False,
'spot': spot,
'futures': futures,
})
return result
def fetch_currencies(self, params={}):
labels = [
'pub:list:currency',
'pub:map:currency:sym',
'pub:map:currency:label',
'pub:map:currency:unit',
'pub:map:currency:undl',
'pub:map:currency:pool',
'pub:map:currency:explorer',
'pub:map:currency:tx:fee',
]
config = ','.join(labels)
request = {
'config': config,
}
response = self.publicGetConfConfig(self.extend(request, params))
indexed = {
'sym': self.index_by(self.safe_value(response, 1, []), 0),
'label': self.index_by(self.safe_value(response, 2, []), 0),
'unit': self.index_by(self.safe_value(response, 3, []), 0),
'undl': self.index_by(self.safe_value(response, 4, []), 0),
'pool': self.index_by(self.safe_value(response, 5, []), 0),
'explorer': self.index_by(self.safe_value(response, 6, []), 0),
'fees': self.index_by(self.safe_value(response, 7, []), 0),
}
ids = self.safe_value(response, 0, [])
result = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
label = self.safe_value(indexed['label'], id, [])
name = self.safe_string(label, 1)
pool = self.safe_value(indexed['pool'], id, [])
type = self.safe_string(pool, 1)
feeValues = self.safe_value(indexed['fees'], id, [])
fees = self.safe_value(feeValues, 1, [])
fee = self.safe_float(fees, 1)
precision = 8
id = 'f' + id
result[code] = {
'id': id,
'code': code,
'info': [id, label, pool, feeValues],
'type': type,
'name': name,
'active': True,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'price': {
'min': 1 / math.pow(10, precision),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': fee,
'max': None,
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostAuthRWallets(params)
balanceType = self.safe_string(params, 'type', 'exchange')
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
accountType = balance[0]
currency = balance[1]
total = balance[2]
available = balance[4]
if accountType == balanceType:
if currency[0] == 't':
currency = currency[1:]
code = self.safe_currency_code(currency)
account = self.account()
account['total'] = total
if not available:
if available == 0:
account['free'] = 0
account['used'] = total
else:
account['free'] = total
else:
account['free'] = available
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
precision = self.safe_value(self.options, 'precision', 'R0')
request = {
'symbol': self.market_id(symbol),
'precision': precision,
}
if limit is not None:
request['len'] = limit
fullRequest = self.extend(request, params)
orderbook = self.publicGetBookSymbolPrecision(fullRequest)
timestamp = self.milliseconds()
result = {
'bids': [],
'asks': [],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0
for i in range(0, len(orderbook)):
order = orderbook[i]
price = order[priceIndex]
amount = abs(order[2])
side = 'bids' if (order[2] > 0) else 'asks'
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
length = len(ticker)
last = ticker[length - 4]
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker[length - 2],
'low': ticker[length - 1],
'bid': ticker[length - 10],
'bidVolume': None,
'ask': ticker[length - 8],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': ticker[length - 6],
'percentage': ticker[length - 5] * 100,
'average': None,
'baseVolume': ticker[length - 3],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
else:
request['symbols'] = 'ALL'
tickers = self.publicGetTickers(self.extend(request, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker[0]
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_symbol(self, marketId):
if marketId is None:
return marketId
marketId = marketId.replace('t', '')
baseId = None
quoteId = None
if marketId.find(':') >= 0:
parts = marketId.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
def parse_trade(self, trade, market=None):
tradeLength = len(trade)
isPrivate = (tradeLength > 5)
id = str(trade[0])
amountIndex = 4 if isPrivate else 2
amount = trade[amountIndex]
cost = None
priceIndex = 5 if isPrivate else 3
price = trade[priceIndex]
side = None
orderId = None
takerOrMaker = None
type = None
fee = None
symbol = None
timestampIndex = 2 if isPrivate else 1
timestamp = trade[timestampIndex]
if isPrivate:
marketId = trade[1]
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = self.parse_symbol(marketId)
orderId = str(trade[3])
takerOrMaker = 'maker' if (trade[8] == 1) else 'taker'
feeCost = trade[9]
feeCurrency = self.safe_currency_code(trade[10])
if feeCost is not None:
feeCost = -feeCost
if symbol in self.markets:
feeCost = self.fee_to_precision(symbol, feeCost)
else:
currencyId = 'f' + feeCurrency
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
feeCost = self.currency_to_precision(currency['code'], feeCost)
fee = {
'cost': float(feeCost),
'currency': feeCurrency,
}
orderType = trade[6]
type = self.safe_string(self.options['exchangeTypes'], orderType)
if symbol is None:
if market is not None:
symbol = market['symbol']
if amount is not None:
side = 'sell' if (amount < 0) else 'buy'
amount = abs(amount)
if cost is None:
if price is not None:
cost = amount * price
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'side': side,
'type': type,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
sort = '-1'
request = {
'symbol': market['id'],
}
if since is not None:
request['start'] = since
sort = '1'
if limit is not None:
request['limit'] = limit
request['sort'] = sort
response = self.publicGetTradesSymbolHist(self.extend(request, params))
trades = self.sort_by(response, 1)
return self.parse_trades(trades, market, None, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=100, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 100
if since is None:
since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request = {
'symbol': market['id'],
'timeframe': self.timeframes[timeframe],
'sort': 1,
'start': since,
'limit': limit,
}
response = self.publicGetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
if status is None:
return status
parts = status.split(' ')
state = self.safe_string(parts, 0)
statuses = {
'ACTIVE': 'open',
'PARTIALLY': 'open',
'EXECUTED': 'closed',
'CANCELED': 'canceled',
'INSUFFICIENT': 'canceled',
'RSN_DUST': 'rejected',
'RSN_PAUSE': 'rejected',
}
return self.safe_string(statuses, state, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 0)
symbol = None
marketId = self.safe_string(order, 3)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = self.parse_symbol(marketId)
if (symbol is None) and (market is not None):
symbol = market['symbol']
timestamp = self.safe_integer(order, 5)
remaining = abs(self.safe_float(order, 6))
amount = abs(self.safe_float(order, 7))
filled = amount - remaining
side = 'sell' if (order[7] < 0) else 'buy'
orderType = self.safe_string(order, 8)
type = self.safe_string(self.safe_value(self.options, 'exchangeTypes'), orderType)
status = None
statusString = self.safe_string(order, 13)
if statusString is not None:
parts = statusString.split(' @ ')
status = self.parse_order_status(self.safe_string(parts, 0))
price = self.safe_float(order, 16)
average = self.safe_float(order, 17)
cost = price * filled
clientOrderId = self.safe_string(order, 2)
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
orderTypes = self.safe_value(self.options, 'orderTypes', {})
orderType = self.safe_string_upper(orderTypes, type, type)
amount = -amount if (side == 'sell') else amount
request = {
'symbol': market['id'],
'type': orderType,
'amount': self.number_to_string(amount),
}
if (orderType == 'LIMIT') or (orderType == 'EXCHANGE LIMIT'):
request['price'] = self.number_to_string(price)
elif (orderType == 'STOP') or (orderType == 'EXCHANGE STOP'):
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'STOP LIMIT') or (orderType == 'EXCHANGE STOP LIMIT'):
priceAuxLimit = self.safe_float(params, 'price_aux_limit')
stopPrice = self.safe_float(params, 'stopPrice')
if priceAuxLimit is None:
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a price_aux_limit parameter for a ' + orderType + ' order')
else:
request['price_aux_limit'] = self.number_to_string(price)
else:
request['price_aux_limit'] = self.number_to_string(priceAuxLimit)
if stopPrice is None:
stopPrice = price
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'TRAILING STOP') or (orderType == 'EXCHANGE TRAILING STOP'):
priceTrailing = self.safe_float(params, 'price_trailing')
request['price_trailing'] = self.number_to_string(priceTrailing)
stopPrice = self.safe_float(params, 'stopPrice', price)
request['price'] = self.number_to_string(stopPrice)
elif (orderType == 'FOK') or (orderType == 'EXCHANGE FOK') or (orderType == 'IOC') or (orderType == 'EXCHANGE IOC'):
request['price'] = self.number_to_string(price)
params = self.omit(params, ['stopPrice', 'price_aux_limit', 'price_trailing'])
clientOrderId = self.safe_value_2(params, 'cid', 'clientOrderId')
if clientOrderId is not None:
request['cid'] = clientOrderId
params = self.omit(params, ['cid', 'clientOrderId'])
response = self.privatePostAuthWOrderSubmit(self.extend(request, params))
CESS, ERROR, FAILURE, ...)
# "Submitting 1 orders." # Text of the notification
# ]
#
status = self.safe_string(response, 6)
if status != 'SUCCESS':
errorCode = response[5]
errorText = response[7]
raise ExchangeError(self.id + ' ' + response[6] + ': ' + errorText + '(
orders = self.safe_value(response, 4, [])
order = self.safe_value(orders, 0)
return self.parse_order(order, market)
def cancel_all_orders(self, symbol=None, params={}):
request = {
'all': 1,
}
response = self.privatePostAuthWOrderCancelMulti(self.extend(request, params))
orders = self.safe_value(response, 4, [])
return self.parse_orders(orders)
def cancel_order(self, id, symbol=None, params={}):
cid = self.safe_value_2(params, 'cid', 'clientOrderId') # client order id
request = None
if cid is not None:
cidDate = self.safe_value(params, 'cidDate') # client order id date
if cidDate is None:
raise InvalidOrder(self.id + " canceling an order by clientOrderId('cid') requires both 'cid' and 'cid_date'('YYYY-MM-DD')")
request = {
'cid': cid,
'cid_date': cidDate,
}
params = self.omit(params, ['cid', 'clientOrderId'])
else:
request = {
'id': int(id),
}
response = self.privatePostAuthWOrderCancel(self.extend(request, params))
order = self.safe_value(response, 4)
return self.parse_order(order)
def fetch_open_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_open_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_closed_order(self, id, symbol=None, params={}):
request = {
'id': [int(id)],
}
orders = self.fetch_closed_orders(symbol, None, None, self.extend(request, params))
order = self.safe_value(orders, 0)
if order is None:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrders(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbol(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# returns the most recent closed or canceled orders up to circa two weeks ago
self.load_markets()
request = {}
market = None
response = None
if symbol is None:
response = self.privatePostAuthROrdersHist(self.extend(request, params))
else:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostAuthROrdersSymbolHist(self.extend(request, params))
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 2500
return self.parse_orders(response, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
orderId = int(id)
request = {
'id': orderId,
'symbol': market['id'],
}
# valid for trades upto 10 days old
response = self.privatePostAuthROrderSymbolIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
'end': self.milliseconds(),
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 25, max 1000
method = 'privatePostAuthRTradesHist'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'privatePostAuthRTradesSymbolHist'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_deposit_address(self, code, params={}):
self.load_markets()
request = {
'op_renew': 1,
}
response = self.fetch_deposit_address(code, self.extend(request, params))
return response
def fetch_deposit_address(self, code, params={}):
self.load_markets()
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'op_renew': 0, # a value of 1 will generate a new address
}
response = self.privatePostAuthWDepositAddress(self.extend(request, params))
#
# [
# 1582269616687, # MTS Millisecond Time Stamp of the update
# 'acc_dep', # TYPE Purpose of notification 'acc_dep' for account deposit
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# null, # PLACEHOLDER
# 'BITCOIN', # METHOD Method of deposit
# 'BTC', # CURRENCY_CODE Currency code of new address
# null, # PLACEHOLDER
# '1BC9PZqpUmjyEB54uggn8TFKj49zSDYzqG', # ADDRESS
# null, # POOL_ADDRESS
# ],
# null, # CODE null or integer work in progress
# 'SUCCESS', # STATUS Status of the notification, SUCCESS, ERROR, FAILURE
# 'success', # TEXT Text of the notification
# ]
#
result = self.safe_value(response, 4, [])
poolAddress = self.safe_string(result, 5)
address = self.safe_string(result, 4) if (poolAddress is None) else poolAddress
tag = None if (poolAddress is None) else self.safe_string(result, 4)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
'SUCCESS': 'ok',
'ERROR': 'failed',
'FAILURE': 'failed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
# fetchTransactions
#
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
#
transactionLength = len(transaction)
timestamp = None
updated = None
code = None
amount = None
id = None
status = None
tag = None
type = None
feeCost = None
txid = None
addressTo = None
if transactionLength < 9:
data = self.safe_value(transaction, 4, [])
timestamp = self.safe_integer(transaction, 0)
if currency is not None:
code = currency['code']
feeCost = self.safe_float(data, 8)
if feeCost is not None:
feeCost = -feeCost
amount = self.safe_float(data, 5)
id = self.safe_value(data, 0)
status = 'ok'
if id == 0:
id = None
status = 'failed'
tag = self.safe_string(data, 3)
type = 'withdrawal'
else:
id = self.safe_string(transaction, 0)
timestamp = self.safe_integer(transaction, 5)
updated = self.safe_integer(transaction, 6)
status = self.parse_transaction_status(self.safe_string(transaction, 9))
amount = self.safe_float(transaction, 12)
if amount is not None:
if amount < 0:
type = 'withdrawal'
else:
type = 'deposit'
feeCost = self.safe_float(transaction, 13)
if feeCost is not None:
feeCost = -feeCost
addressTo = self.safe_string(transaction, 16)
txid = self.safe_string(transaction, 20)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': addressTo, # self is actually the tag for XRP transfers(the address is missing)
'addressTo': addressTo,
'tagFrom': None,
'tag': tag, # refix it properly for the tag from description
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
method = 'privatePostAuthRMovementsHist'
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method = 'privatePostAuthRMovementsCurrencyHist'
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # max 1000
response = getattr(self, method)(self.extend(request, params))
#
# [
# [
# 13293039, # ID
# 'ETH', # CURRENCY
# 'ETHEREUM', # CURRENCY_NAME
# null,
# null,
# 1574175052000, # MTS_STARTED
# 1574181326000, # MTS_UPDATED
# null,
# null,
# 'CANCELED', # STATUS
# null,
# null,
# -0.24, # AMOUNT, negative for withdrawals
# -0.00135, # FEES
# null,
# null,
# 'DESTINATION_ADDRESS',
# null,
# null,
# null,
# 'TRANSACTION_ID',
# "Purchase of 100 pizzas", # WITHDRAW_TRANSACTION_NOTE
# ]
# ]
#
return self.parse_transactions(response, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# todo rewrite for https://api-pub.bitfinex.com//v2/conf/pub:map:tx:method
name = self.getCurrencyName(code)
request = {
'method': name,
'wallet': 'exchange', # 'exchange', 'margin', 'funding' and also old labels 'exchange', 'trading', 'deposit', respectively
'amount': self.number_to_string(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
response = self.privatePostAuthWWithdraw(self.extend(request, params))
#
# [
# 1582271520931, # MTS Millisecond Time Stamp of the update
# "acc_wd-req", # TYPE Purpose of notification 'acc_wd-req' account withdrawal request
# null, # MESSAGE_ID unique ID of the message
# null, # not documented
# [
# 0, # WITHDRAWAL_ID Unique Withdrawal ID
# null, # PLACEHOLDER
# "bitcoin", # METHOD Method of withdrawal
# null, # PAYMENT_ID Payment ID if relevant
# "exchange", # WALLET Sending wallet
# 1, # AMOUNT Amount of Withdrawal less fee
# null, # PLACEHOLDER
# null, # PLACEHOLDER
# 0.0004, # WITHDRAWAL_FEE Fee on withdrawal
# ],
# null, # CODE null or integer Work in progress
# "SUCCESS", # STATUS Status of the notification, it may vary over time SUCCESS, ERROR, FAILURE
# "Invalid bitcoin address(abcdef)", # TEXT Text of the notification
# ]
#
text = self.safe_string(response, 7)
if text != 'success':
self.throw_broadly_matched_exception(self.exceptions['broad'], text, text)
transaction = self.parse_transaction(response, currency)
return self.extend(transaction, {
'address': address,
})
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privatePostPositions(params)
#
# [
# [
# "tBTCUSD", # SYMBOL
# "ACTIVE", # STATUS
# 0.0195, # AMOUNT
# 8565.0267019, # BASE_PRICE
# 0, # MARGIN_FUNDING
# 0, # MARGIN_FUNDING_TYPE
# -0.33455568705000516, # PL
# -0.0003117550117425625, # PL_PERC
# 7045.876419249083, # PRICE_LIQ
# 3.0673001895895604, # LEVERAGE
# null, # _PLACEHOLDER
# 142355652, # POSITION_ID
# 1574002216000, # MTS_CREATE
# 1574002216000, # MTS_UPDATE
# null, # _PLACEHOLDER
# 0, # TYPE
# null, # _PLACEHOLDER
# 0, # COLLATERAL
# 0, # COLLATERAL_MIN
# # META
# {
# "reason":"TRADE",
# "order_id":34271018124,
# "liq_stage":null,
# "trade_price":"8565.0267019",
# "trade_amount":"0.0195",
# "order_id_oppo":34277498022
# }
# ]
# ]
#
# todo unify parsePosition/parsePositions
return response
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'v1':
request = api + request
else:
request = self.version + request
url = self.urls['api'][api] + '/' + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query)
auth = '/api/' + request + nonce + body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384)
headers = {
'bfx-nonce': nonce,
'bfx-apikey': self.apiKey,
'bfx-signature': signature,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if response:
if 'message' in response:
if response['message'].find('not enough exchange balance') >= 0:
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
elif response == '':
raise ExchangeError(self.id + ' returned empty response')
return response
def handle_errors(self, statusCode, statusText, url, method, responseHeaders, responseBody, response, requestHeaders, requestBody):
if statusCode == 500:
# See https://docs.bitfinex.com/docs/abbreviations-glossary#section-errorinfo-codes
errorCode = self.number_to_string(response[1])
errorText = response[2]
feedback = self.id + ' ' + errorText
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorText, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorText, feedback)
raise ExchangeError(self.id + ' ' + errorText + '(
| true
| true
|
1c46380a9d4866b842ff9a1c2591e0d1ba1f4588
| 11,345
|
py
|
Python
|
nuitka/tree/ReformulationDictionaryCreation.py
|
em3ndez/Nuitka
|
a5a036a94c1842d1cd72f27c0c67461798fdf977
|
[
"Apache-2.0"
] | 1
|
2019-09-09T19:27:43.000Z
|
2019-09-09T19:27:43.000Z
|
nuitka/tree/ReformulationDictionaryCreation.py
|
em3ndez/Nuitka
|
a5a036a94c1842d1cd72f27c0c67461798fdf977
|
[
"Apache-2.0"
] | 1
|
2019-02-21T13:05:17.000Z
|
2019-02-21T13:05:17.000Z
|
nuitka/tree/ReformulationDictionaryCreation.py
|
em3ndez/Nuitka
|
a5a036a94c1842d1cd72f27c0c67461798fdf977
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of dictionary creations.
Dictionary creations might be directly translated to constants, or they might
become nodes that build dictionaries.
For Python3.5, unpacking can happen while creating dictionaries, these are
being re-formulated to an internal function.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionKeyValuePair,
StatementDictOperationUpdate,
makeExpressionMakeDict,
makeExpressionMakeDictOrConstant,
makeExpressionPairs,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
def buildDictionaryNode(provider, node, source_ref):
if python_version >= 350:
for key in node.keys:
if key is None:
return buildDictionaryUnpacking(
provider=provider, node=node, source_ref=source_ref
)
return makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(
keys=buildNodeList(provider, node.keys, source_ref),
values=buildNodeList(provider, node.values, source_ref),
),
user_provided=True,
source_ref=source_ref,
)
@once_decorator
def getDictUnpackingHelper():
helper_name = "_unpack_dict"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=(),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "keys")
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
makeTryExceptSingleHandlerNode(
tried=StatementDictOperationUpdate(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
value=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
'%s' object is not a mapping""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionAttributeLookup(
expression=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
attribute_name="__name__",
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
# We get handed our args responsibility.
StatementDelVariable(
variable=args_variable, tolerant=False, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
StatementLoop(body=loop_body, source_ref=internal_source_ref),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
result.setBody(
makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
)
)
return result
def buildDictionaryUnpackingArgs(provider, keys, values, source_ref):
result = []
for key, value in zip(keys, values):
# TODO: We could be a lot cleverer about the dictionaries for non-starred
# arguments, but lets get this to work first.
if key is None:
result.append(buildNode(provider, value, source_ref))
elif type(key) is str:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key, source_ref=source_ref
),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
else:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=buildNode(provider, key, source_ref),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
return result
def buildDictionaryUnpacking(provider, node, source_ref):
helper_args = buildDictionaryUnpackingArgs(
provider, node.keys, node.values, source_ref
)
result = ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getDictUnpackingHelper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=(makeExpressionMakeTuple(helper_args, source_ref),),
source_ref=source_ref,
)
result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference())
return result
| 36.362179
| 87
| 0.603967
|
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionKeyValuePair,
StatementDictOperationUpdate,
makeExpressionMakeDict,
makeExpressionMakeDictOrConstant,
makeExpressionPairs,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
def buildDictionaryNode(provider, node, source_ref):
if python_version >= 350:
for key in node.keys:
if key is None:
return buildDictionaryUnpacking(
provider=provider, node=node, source_ref=source_ref
)
return makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(
keys=buildNodeList(provider, node.keys, source_ref),
values=buildNodeList(provider, node.values, source_ref),
),
user_provided=True,
source_ref=source_ref,
)
@once_decorator
def getDictUnpackingHelper():
helper_name = "_unpack_dict"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=(),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "keys")
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
makeTryExceptSingleHandlerNode(
tried=StatementDictOperationUpdate(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
value=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
'%s' object is not a mapping""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionAttributeLookup(
expression=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
attribute_name="__name__",
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
StatementDelVariable(
variable=args_variable, tolerant=False, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
StatementLoop(body=loop_body, source_ref=internal_source_ref),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
result.setBody(
makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
)
)
return result
def buildDictionaryUnpackingArgs(provider, keys, values, source_ref):
result = []
for key, value in zip(keys, values):
if key is None:
result.append(buildNode(provider, value, source_ref))
elif type(key) is str:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key, source_ref=source_ref
),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
else:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=buildNode(provider, key, source_ref),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
return result
def buildDictionaryUnpacking(provider, node, source_ref):
helper_args = buildDictionaryUnpackingArgs(
provider, node.keys, node.values, source_ref
)
result = ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getDictUnpackingHelper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=(makeExpressionMakeTuple(helper_args, source_ref),),
source_ref=source_ref,
)
result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference())
return result
| true
| true
|
1c46385a5596d5e0bd98b187dbff517c3d1d3c1c
| 20,549
|
py
|
Python
|
flux_combined_high_binding/model_741.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_741.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_741.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 87500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 60000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259
| 798
| 0.804127
|
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 87500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 60000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true
| true
|
1c463a5d7cdbdef19a2f8ee060198069e58e05e9
| 549
|
py
|
Python
|
core/utils.py
|
matiaspacheco/cms_wehaa
|
999f49344c453afd1cf8f11f36ac6b56b2b7f130
|
[
"MIT"
] | null | null | null |
core/utils.py
|
matiaspacheco/cms_wehaa
|
999f49344c453afd1cf8f11f36ac6b56b2b7f130
|
[
"MIT"
] | null | null | null |
core/utils.py
|
matiaspacheco/cms_wehaa
|
999f49344c453afd1cf8f11f36ac6b56b2b7f130
|
[
"MIT"
] | null | null | null |
# Standard Python library imports.
import math
import re
# Core Django imports.
from django.utils.html import strip_tags
def count_words(html_string):
# html_string = """
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words) #joincfe.com/projects/
return count
def read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0) #assuming 200wpm reading
return int(read_time_min)
| 26.142857
| 67
| 0.714026
|
import math
import re
from django.utils.html import strip_tags
def count_words(html_string):
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words)
return count
def read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0)
return int(read_time_min)
| true
| true
|
1c463a5ecbad7e73fb57009519d7ca474d07af2c
| 2,566
|
py
|
Python
|
web-api/favorites/views.py
|
Egor4ik325/anyberry
|
87787f82f1cec0f32d9d7c7384e7b2771f34af3c
|
[
"MIT"
] | 1
|
2021-09-12T16:28:52.000Z
|
2021-09-12T16:28:52.000Z
|
web-api/favorites/views.py
|
Egor4ik325/anyberry
|
87787f82f1cec0f32d9d7c7384e7b2771f34af3c
|
[
"MIT"
] | 2
|
2021-09-06T08:31:56.000Z
|
2021-09-06T08:35:25.000Z
|
web-api/favorites/views.py
|
Egor4ik325/anyberry
|
87787f82f1cec0f32d9d7c7384e7b2771f34af3c
|
[
"MIT"
] | null | null | null |
from berries.models import Berry
from berries.serializers import BerrySerializer
from django.core.cache import cache
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from favorites import serializers
from favorites.serializers import FavoriteDeserializer
class FavoriteViewSet(ViewSet):
"""
API view for CARRL berries from the favorite list.
List of favorite berries is stored in the Redis datastore and
references some (FK) user.
"""
cache_format = "favorite_{user}"
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
def list(self, *args, **kwargs):
"""List berries in the favorite list.
List consist only of unique berries.
"""
# Get favorite berries from the Redis (cache)
berries = cache.get(self.get_cache_key(), default=set())
data = list(berries)
return Response(data)
def clear(self, *args, **kwargs):
"""Clear berries in the favorite list."""
cache.delete(self.get_cache_key())
return Response(status=status.HTTP_204_NO_CONTENT)
def add(self, *args, **kwargs):
"""Add berry to the favorite list.
//This has no effect if the element is already present.
"""
serializer = FavoriteDeserializer(data=self.request.data)
serializer.is_valid(raise_exception=True)
berry = serializer.save()
berries: set = cache.get(self.get_cache_key(), default=set())
berries.add(berry.id)
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_201_CREATED)
def remove(self, *args, **kwargs):
"""Remove berry from the favorite list."""
berry_id = self.kwargs["berry_id"]
berry = Berry.berries.get(id=berry_id)
berries: set = cache.get(self.get_cache_key(), default=set())
try:
berries.remove(berry.id)
except KeyError:
pass
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_cache_key(self):
return self.cache_format.format(user=self.request.user.id)
favorite_list_view = FavoriteViewSet.as_view(
{"get": "list", "post": "add", "delete": "clear"})
favorite_detail_view = FavoriteViewSet.as_view(
{"delete": "remove"})
| 32.897436
| 69
| 0.687062
|
from berries.models import Berry
from berries.serializers import BerrySerializer
from django.core.cache import cache
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from favorites import serializers
from favorites.serializers import FavoriteDeserializer
class FavoriteViewSet(ViewSet):
cache_format = "favorite_{user}"
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
def list(self, *args, **kwargs):
berries = cache.get(self.get_cache_key(), default=set())
data = list(berries)
return Response(data)
def clear(self, *args, **kwargs):
cache.delete(self.get_cache_key())
return Response(status=status.HTTP_204_NO_CONTENT)
def add(self, *args, **kwargs):
serializer = FavoriteDeserializer(data=self.request.data)
serializer.is_valid(raise_exception=True)
berry = serializer.save()
berries: set = cache.get(self.get_cache_key(), default=set())
berries.add(berry.id)
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_201_CREATED)
def remove(self, *args, **kwargs):
berry_id = self.kwargs["berry_id"]
berry = Berry.berries.get(id=berry_id)
berries: set = cache.get(self.get_cache_key(), default=set())
try:
berries.remove(berry.id)
except KeyError:
pass
cache.set(self.get_cache_key(), berries, timeout=None)
return Response(status=status.HTTP_204_NO_CONTENT)
def get_cache_key(self):
return self.cache_format.format(user=self.request.user.id)
favorite_list_view = FavoriteViewSet.as_view(
{"get": "list", "post": "add", "delete": "clear"})
favorite_detail_view = FavoriteViewSet.as_view(
{"delete": "remove"})
| true
| true
|
1c463a9792c032fff33e3627f78835a1ae9c2a50
| 4,436
|
py
|
Python
|
elastalert/kibana_external_url_formatter.py
|
buratinopy/elastalert2
|
27deb8a61dd48798c4686ec95d3e48909903a694
|
[
"Apache-2.0"
] | null | null | null |
elastalert/kibana_external_url_formatter.py
|
buratinopy/elastalert2
|
27deb8a61dd48798c4686ec95d3e48909903a694
|
[
"Apache-2.0"
] | null | null | null |
elastalert/kibana_external_url_formatter.py
|
buratinopy/elastalert2
|
27deb8a61dd48798c4686ec95d3e48909903a694
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import os
from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit
import requests
from requests import RequestException
from requests.auth import AuthBase, HTTPBasicAuth
from elastalert.auth import RefeshableAWSRequestsAuth
from elastalert.util import EAException
def append_security_tenant(url, security_tenant):
'''Appends the security_tenant query string parameter to the url'''
parsed = urlsplit(url)
if parsed.query:
qs = parse_qsl(parsed.query, keep_blank_values=True, strict_parsing=True)
else:
qs = []
qs.append(('security_tenant', security_tenant))
new_query = urlencode(qs)
new_args = parsed._replace(query=new_query)
new_url = urlunsplit(new_args)
return new_url
class KibanaExternalUrlFormatter:
'''Interface for formatting external Kibana urls'''
def format(self, relative_url: str) -> str:
raise NotImplementedError()
class AbsoluteKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
'''Formats absolute external Kibana urls'''
def __init__(self, base_url: str, security_tenant: str) -> None:
self.base_url = base_url
self.security_tenant = security_tenant
def format(self, relative_url: str) -> str:
url = urljoin(self.base_url, relative_url)
if self.security_tenant:
url = append_security_tenant(url, self.security_tenant)
return url
class ShortKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
'''Formats external urls using the Kibana Shorten URL API'''
def __init__(self, base_url: str, auth: AuthBase, security_tenant: str) -> None:
self.auth = auth
self.security_tenant = security_tenant
self.goto_url = urljoin(base_url, 'goto/')
shorten_url = urljoin(base_url, 'api/shorten_url')
if security_tenant:
shorten_url = append_security_tenant(shorten_url, security_tenant)
self.shorten_url = shorten_url
def format(self, relative_url: str) -> str:
# join with '/' to ensure relative to root of app
long_url = urljoin('/', relative_url)
if self.security_tenant:
long_url = append_security_tenant(long_url, self.security_tenant)
try:
response = requests.post(
url=self.shorten_url,
auth=self.auth,
headers={
'kbn-xsrf': 'elastalert',
'osd-xsrf': 'elastalert'
},
json={
'url': long_url
}
)
response.raise_for_status()
except RequestException as e:
raise EAException("Failed to invoke Kibana Shorten URL API: %s" % e)
response_body = response.json()
url_id = response_body.get('urlId')
goto_url = urljoin(self.goto_url, url_id)
if self.security_tenant:
goto_url = append_security_tenant(goto_url, self.security_tenant)
return goto_url
def create_kibana_auth(kibana_url, rule) -> AuthBase:
'''Creates a Kibana http authentication for use by requests'''
# Basic
username = rule.get('kibana_username')
password = rule.get('kibana_password')
if username and password:
return HTTPBasicAuth(username, password)
# AWS SigV4
aws_region = rule.get('aws_region')
if not aws_region:
aws_region = os.environ.get('AWS_DEFAULT_REGION')
if aws_region:
aws_profile = rule.get('profile')
session = boto3.session.Session(
profile_name=aws_profile,
region_name=aws_region
)
credentials = session.get_credentials()
kibana_host = urlparse(kibana_url).hostname
return RefeshableAWSRequestsAuth(
refreshable_credential=credentials,
aws_host=kibana_host,
aws_region=aws_region,
aws_service='es'
)
# Unauthenticated
return None
def create_kibana_external_url_formatter(
rule,
shorten: bool,
security_tenant: str
) -> KibanaExternalUrlFormatter:
'''Creates a Kibana external url formatter'''
base_url = rule.get('kibana_url')
if shorten:
auth = create_kibana_auth(base_url, rule)
return ShortKibanaExternalUrlFormatter(base_url, auth, security_tenant)
return AbsoluteKibanaExternalUrlFormatter(base_url, security_tenant)
| 31.913669
| 86
| 0.665014
|
import boto3
import os
from urllib.parse import parse_qsl, urlencode, urljoin, urlparse, urlsplit, urlunsplit
import requests
from requests import RequestException
from requests.auth import AuthBase, HTTPBasicAuth
from elastalert.auth import RefeshableAWSRequestsAuth
from elastalert.util import EAException
def append_security_tenant(url, security_tenant):
parsed = urlsplit(url)
if parsed.query:
qs = parse_qsl(parsed.query, keep_blank_values=True, strict_parsing=True)
else:
qs = []
qs.append(('security_tenant', security_tenant))
new_query = urlencode(qs)
new_args = parsed._replace(query=new_query)
new_url = urlunsplit(new_args)
return new_url
class KibanaExternalUrlFormatter:
def format(self, relative_url: str) -> str:
raise NotImplementedError()
class AbsoluteKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
def __init__(self, base_url: str, security_tenant: str) -> None:
self.base_url = base_url
self.security_tenant = security_tenant
def format(self, relative_url: str) -> str:
url = urljoin(self.base_url, relative_url)
if self.security_tenant:
url = append_security_tenant(url, self.security_tenant)
return url
class ShortKibanaExternalUrlFormatter(KibanaExternalUrlFormatter):
def __init__(self, base_url: str, auth: AuthBase, security_tenant: str) -> None:
self.auth = auth
self.security_tenant = security_tenant
self.goto_url = urljoin(base_url, 'goto/')
shorten_url = urljoin(base_url, 'api/shorten_url')
if security_tenant:
shorten_url = append_security_tenant(shorten_url, security_tenant)
self.shorten_url = shorten_url
def format(self, relative_url: str) -> str:
long_url = urljoin('/', relative_url)
if self.security_tenant:
long_url = append_security_tenant(long_url, self.security_tenant)
try:
response = requests.post(
url=self.shorten_url,
auth=self.auth,
headers={
'kbn-xsrf': 'elastalert',
'osd-xsrf': 'elastalert'
},
json={
'url': long_url
}
)
response.raise_for_status()
except RequestException as e:
raise EAException("Failed to invoke Kibana Shorten URL API: %s" % e)
response_body = response.json()
url_id = response_body.get('urlId')
goto_url = urljoin(self.goto_url, url_id)
if self.security_tenant:
goto_url = append_security_tenant(goto_url, self.security_tenant)
return goto_url
def create_kibana_auth(kibana_url, rule) -> AuthBase:
username = rule.get('kibana_username')
password = rule.get('kibana_password')
if username and password:
return HTTPBasicAuth(username, password)
aws_region = rule.get('aws_region')
if not aws_region:
aws_region = os.environ.get('AWS_DEFAULT_REGION')
if aws_region:
aws_profile = rule.get('profile')
session = boto3.session.Session(
profile_name=aws_profile,
region_name=aws_region
)
credentials = session.get_credentials()
kibana_host = urlparse(kibana_url).hostname
return RefeshableAWSRequestsAuth(
refreshable_credential=credentials,
aws_host=kibana_host,
aws_region=aws_region,
aws_service='es'
)
return None
def create_kibana_external_url_formatter(
rule,
shorten: bool,
security_tenant: str
) -> KibanaExternalUrlFormatter:
base_url = rule.get('kibana_url')
if shorten:
auth = create_kibana_auth(base_url, rule)
return ShortKibanaExternalUrlFormatter(base_url, auth, security_tenant)
return AbsoluteKibanaExternalUrlFormatter(base_url, security_tenant)
| true
| true
|
1c463b00bcc93f690abe0126cebd12479e2b2c5d
| 1,568
|
py
|
Python
|
cirq/optimizers/drop_negligible.py
|
sleichen/Cirq
|
02f715203406d1f2af2d86e7561af09a2cdd4d45
|
[
"Apache-2.0"
] | 1
|
2020-05-20T00:08:33.000Z
|
2020-05-20T00:08:33.000Z
|
cirq/optimizers/drop_negligible.py
|
sleichen/Cirq
|
02f715203406d1f2af2d86e7561af09a2cdd4d45
|
[
"Apache-2.0"
] | null | null | null |
cirq/optimizers/drop_negligible.py
|
sleichen/Cirq
|
02f715203406d1f2af2d86e7561af09a2cdd4d45
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that removes operations with tiny effects."""
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
"""An optimization pass that removes operations with tiny effects."""
def __init__(self, tolerance: float = 1e-8) -> None:
self.tolerance = tolerance
def optimize_circuit(self, circuit: _circuit.Circuit) -> None:
deletions = [] # type: List[Tuple[int, ops.Operation]]
for moment_index, moment in enumerate(circuit):
for op in moment.operations:
if (op is not None and
protocols.trace_distance_bound(op) <= self.tolerance):
deletions.append((moment_index, op))
circuit.batch_remove(deletions)
| 37.333333
| 78
| 0.714286
|
from typing import TYPE_CHECKING
from cirq import protocols
from cirq.circuits import optimization_pass, circuit as _circuit
if TYPE_CHECKING:
from typing import List, Tuple
from cirq import ops
class DropNegligible(optimization_pass.OptimizationPass):
def __init__(self, tolerance: float = 1e-8) -> None:
self.tolerance = tolerance
def optimize_circuit(self, circuit: _circuit.Circuit) -> None:
deletions = []
for moment_index, moment in enumerate(circuit):
for op in moment.operations:
if (op is not None and
protocols.trace_distance_bound(op) <= self.tolerance):
deletions.append((moment_index, op))
circuit.batch_remove(deletions)
| true
| true
|
1c463cf1cadf9635379497394d42b7e870640036
| 5,782
|
py
|
Python
|
egs/wenetspeech/ASR/local/text2token.py
|
zhu-han/icefall
|
9f6c748b3098e3e32c704c27c40ec31f2e9d376c
|
[
"Apache-2.0"
] | null | null | null |
egs/wenetspeech/ASR/local/text2token.py
|
zhu-han/icefall
|
9f6c748b3098e3e32c704c27c40ec31f2e9d376c
|
[
"Apache-2.0"
] | null | null | null |
egs/wenetspeech/ASR/local/text2token.py
|
zhu-han/icefall
|
9f6c748b3098e3e32c704c27c40ec31f2e9d376c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2017 Johns Hopkins University (authors: Shinji Watanabe)
# 2022 Xiaomi Corp. (authors: Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import re
import sys
from typing import List
from pypinyin import lazy_pinyin, pinyin
is_python2 = sys.version_info[0] == 2
def exist_or_not(i, match_pos):
start_pos = None
end_pos = None
for pos in match_pos:
if pos[0] <= i < pos[1]:
start_pos = pos[0]
end_pos = pos[1]
break
return start_pos, end_pos
def get_parser():
parser = argparse.ArgumentParser(
description="convert raw text to tokenized text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--nchar",
"-n",
default=1,
type=int,
help="number of characters to split, i.e., \
aabb -> a a b b with -n 1 and aa bb with -n 2",
)
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument(
"--space", default="<space>", type=str, help="space symbol"
)
parser.add_argument(
"--non-lang-syms",
"-l",
default=None,
type=str,
help="list of non-linguistic symobles, e.g., <NOISE> etc.",
)
parser.add_argument(
"text", type=str, default=False, nargs="?", help="input text"
)
parser.add_argument(
"--trans_type",
"-t",
type=str,
default="char",
choices=["char", "pinyin", "lazy_pinyin"],
help="""Transcript type. char/pinyin/lazy_pinyin""",
)
return parser
def token2id(
texts, token_table, token_type: str = "lazy_pinyin", oov: str = "<unk>"
) -> List[List[int]]:
"""Convert token to id.
Args:
texts:
The input texts, it refers to the chinese text here.
token_table:
The token table is built based on "data/lang_xxx/token.txt"
token_type:
The type of token, such as "pinyin" and "lazy_pinyin".
oov:
Out of vocabulary token. When a word(token) in the transcript
does not exist in the token list, it is replaced with `oov`.
Returns:
The list of ids for the input texts.
"""
if texts is None:
raise ValueError("texts can't be None!")
else:
oov_id = token_table[oov]
ids: List[List[int]] = []
for text in texts:
chars_list = list(str(text))
if token_type == "lazy_pinyin":
text = lazy_pinyin(chars_list)
sub_ids = [
token_table[txt] if txt in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
else: # token_type = "pinyin"
text = pinyin(chars_list)
sub_ids = [
token_table[txt[0]] if txt[0] in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
return ids
def main():
parser = get_parser()
args = parser.parse_args()
rs = []
if args.non_lang_syms is not None:
with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
nls = [x.rstrip() for x in f.readlines()]
rs = [re.compile(re.escape(x)) for x in nls]
if args.text:
f = codecs.open(args.text, encoding="utf-8")
else:
f = codecs.getreader("utf-8")(
sys.stdin if is_python2 else sys.stdin.buffer
)
sys.stdout = codecs.getwriter("utf-8")(
sys.stdout if is_python2 else sys.stdout.buffer
)
line = f.readline()
n = args.nchar
while line:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
a = " ".join(x[args.skip_ncols :]) # noqa E203
# get all matched positions
match_pos = []
for r in rs:
i = 0
while i >= 0:
m = r.search(a, i)
if m:
match_pos.append([m.start(), m.end()])
i = m.end()
else:
break
if len(match_pos) > 0:
chars = []
i = 0
while i < len(a):
start_pos, end_pos = exist_or_not(i, match_pos)
if start_pos is not None:
chars.append(a[start_pos:end_pos])
i = end_pos
else:
chars.append(a[i])
i += 1
a = chars
if args.trans_type == "pinyin":
a = pinyin(list(str(a)))
a = [one[0] for one in a]
if args.trans_type == "lazy_pinyin":
a = lazy_pinyin(list(str(a)))
a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
a_flat = []
for z in a:
a_flat.append("".join(z))
a_chars = [z.replace(" ", args.space) for z in a_flat]
print("".join(a_chars))
line = f.readline()
if __name__ == "__main__":
main()
| 29.350254
| 78
| 0.5422
|
import argparse
import codecs
import re
import sys
from typing import List
from pypinyin import lazy_pinyin, pinyin
is_python2 = sys.version_info[0] == 2
def exist_or_not(i, match_pos):
start_pos = None
end_pos = None
for pos in match_pos:
if pos[0] <= i < pos[1]:
start_pos = pos[0]
end_pos = pos[1]
break
return start_pos, end_pos
def get_parser():
parser = argparse.ArgumentParser(
description="convert raw text to tokenized text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--nchar",
"-n",
default=1,
type=int,
help="number of characters to split, i.e., \
aabb -> a a b b with -n 1 and aa bb with -n 2",
)
parser.add_argument(
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
)
parser.add_argument(
"--space", default="<space>", type=str, help="space symbol"
)
parser.add_argument(
"--non-lang-syms",
"-l",
default=None,
type=str,
help="list of non-linguistic symobles, e.g., <NOISE> etc.",
)
parser.add_argument(
"text", type=str, default=False, nargs="?", help="input text"
)
parser.add_argument(
"--trans_type",
"-t",
type=str,
default="char",
choices=["char", "pinyin", "lazy_pinyin"],
help="""Transcript type. char/pinyin/lazy_pinyin""",
)
return parser
def token2id(
texts, token_table, token_type: str = "lazy_pinyin", oov: str = "<unk>"
) -> List[List[int]]:
if texts is None:
raise ValueError("texts can't be None!")
else:
oov_id = token_table[oov]
ids: List[List[int]] = []
for text in texts:
chars_list = list(str(text))
if token_type == "lazy_pinyin":
text = lazy_pinyin(chars_list)
sub_ids = [
token_table[txt] if txt in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
else: # token_type = "pinyin"
text = pinyin(chars_list)
sub_ids = [
token_table[txt[0]] if txt[0] in token_table else oov_id
for txt in text
]
ids.append(sub_ids)
return ids
def main():
parser = get_parser()
args = parser.parse_args()
rs = []
if args.non_lang_syms is not None:
with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
nls = [x.rstrip() for x in f.readlines()]
rs = [re.compile(re.escape(x)) for x in nls]
if args.text:
f = codecs.open(args.text, encoding="utf-8")
else:
f = codecs.getreader("utf-8")(
sys.stdin if is_python2 else sys.stdin.buffer
)
sys.stdout = codecs.getwriter("utf-8")(
sys.stdout if is_python2 else sys.stdout.buffer
)
line = f.readline()
n = args.nchar
while line:
x = line.split()
print(" ".join(x[: args.skip_ncols]), end=" ")
a = " ".join(x[args.skip_ncols :]) # noqa E203
# get all matched positions
match_pos = []
for r in rs:
i = 0
while i >= 0:
m = r.search(a, i)
if m:
match_pos.append([m.start(), m.end()])
i = m.end()
else:
break
if len(match_pos) > 0:
chars = []
i = 0
while i < len(a):
start_pos, end_pos = exist_or_not(i, match_pos)
if start_pos is not None:
chars.append(a[start_pos:end_pos])
i = end_pos
else:
chars.append(a[i])
i += 1
a = chars
if args.trans_type == "pinyin":
a = pinyin(list(str(a)))
a = [one[0] for one in a]
if args.trans_type == "lazy_pinyin":
a = lazy_pinyin(list(str(a)))
a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
a_flat = []
for z in a:
a_flat.append("".join(z))
a_chars = [z.replace(" ", args.space) for z in a_flat]
print("".join(a_chars))
line = f.readline()
if __name__ == "__main__":
main()
| true
| true
|
1c463d061e46a0550d594d6f027f9723b5d225f9
| 43
|
py
|
Python
|
streams/rewinder/__init__.py
|
adrn/streams
|
6478d37309ba1dff4e13e8e46b93eafb4ef36431
|
[
"MIT"
] | null | null | null |
streams/rewinder/__init__.py
|
adrn/streams
|
6478d37309ba1dff4e13e8e46b93eafb4ef36431
|
[
"MIT"
] | null | null | null |
streams/rewinder/__init__.py
|
adrn/streams
|
6478d37309ba1dff4e13e8e46b93eafb4ef36431
|
[
"MIT"
] | null | null | null |
from .core import *
from .sampler import *
| 14.333333
| 22
| 0.72093
|
from .core import *
from .sampler import *
| true
| true
|
1c463e35fe5e172b70142ced199c9afc204daeb5
| 662
|
py
|
Python
|
main.py
|
wang-h/backend-app-fastapi-sqlite
|
c155229e7187e381457730a40a9d660c0e98440d
|
[
"MIT"
] | null | null | null |
main.py
|
wang-h/backend-app-fastapi-sqlite
|
c155229e7187e381457730a40a9d660c0e98440d
|
[
"MIT"
] | null | null | null |
main.py
|
wang-h/backend-app-fastapi-sqlite
|
c155229e7187e381457730a40a9d660c0e98440d
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url="{}/openapi.json".format(settings.API_V1_STR)
)
# 设置跨域请求允许来源
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=settings.API_V1_STR)
| 26.48
| 68
| 0.712991
|
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from app.api.api_v1.api import api_router
from app.core.config import settings
app = FastAPI(
title=settings.PROJECT_NAME,
openapi_url="{}/openapi.json".format(settings.API_V1_STR)
)
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin)
for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix=settings.API_V1_STR)
| true
| true
|
1c463e704757405d935040c3c1db9e5051f1a01b
| 3,160
|
py
|
Python
|
src/python/WMCore/WMRuntime/ScriptInvoke.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMRuntime/ScriptInvoke.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMRuntime/ScriptInvoke.py
|
vkuznet/WMCore
|
001cc51651052405a7ecd811cde91da611b1dc57
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_ScriptInvoker_
Util to invoke a Runtime Script and provide it with access to the
various bits of the job that it will need to access via the WMTaskSpace
library
This script will be invoked at runtime from the directory & subshell
environment in which the Runtime Script implementation needs to be called.
"""
from __future__ import print_function
from builtins import object
import logging
import os
import sys
import traceback
import WMCore.WMRuntime.Bootstrap as Bootstrap
from WMCore.WMRuntime.ScriptFactory import getScript
class ScriptInvoke(object):
"""
_ScriptInvoke_
Ctor takes two arguments:
- module name of step module in WMTaskSpace
- module name of the Script implementation to be invoked
"""
def __init__(self, stepModule, scriptModule):
self.stepModule = stepModule
self.module = scriptModule
self.exitCode = 0
self.stepSpace = None
self.script = None
self.step = None
self.task = None
self.job = None
currentDir = os.getcwd()
Bootstrap.setupLogging(currentDir, useStdout=True)
logging.info("Invoking scripts in current directory: %s", currentDir)
def boot(self):
"""
_boot_
Import the Step Module & get the stepSpace object from it.
Get an instance of the Script from the Script Factory
"""
self.job = Bootstrap.loadJobDefinition()
self.task = Bootstrap.loadTask(self.job)
stepSpaceMod = __import__(self.stepModule,
globals(), locals(), ['stepSpace'], 0)
self.stepSpace = stepSpaceMod.stepSpace
self.step = self.task.getStep(self.stepSpace.stepName)
self.script = getScript(scriptModule)
self.script.task = self.task
self.script.step = self.step
self.script.job = self.job
self.script.stepSpace = self.stepSpace
def invoke(self):
"""
_invoke_
call the Script implementation
"""
self.exitCode = self.script()
def exit(self):
return self.exitCode
if __name__ == '__main__':
try:
stepModule = sys.argv[1]
scriptModule = sys.argv[2]
except Exception as ex:
msg = "Usage: ScriptInvoke.py <Step Module> <Script Module>"
raise RuntimeError(msg)
invoker = ScriptInvoke(stepModule, scriptModule)
try:
invoker.boot()
except Exception as ex:
msg = "Error booting script invoker for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
try:
invoker.invoke()
except Exception as ex:
msg = "Error invoking script for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
sys.exit(invoker.exit())
| 26.115702
| 77
| 0.631013
|
from __future__ import print_function
from builtins import object
import logging
import os
import sys
import traceback
import WMCore.WMRuntime.Bootstrap as Bootstrap
from WMCore.WMRuntime.ScriptFactory import getScript
class ScriptInvoke(object):
def __init__(self, stepModule, scriptModule):
self.stepModule = stepModule
self.module = scriptModule
self.exitCode = 0
self.stepSpace = None
self.script = None
self.step = None
self.task = None
self.job = None
currentDir = os.getcwd()
Bootstrap.setupLogging(currentDir, useStdout=True)
logging.info("Invoking scripts in current directory: %s", currentDir)
def boot(self):
self.job = Bootstrap.loadJobDefinition()
self.task = Bootstrap.loadTask(self.job)
stepSpaceMod = __import__(self.stepModule,
globals(), locals(), ['stepSpace'], 0)
self.stepSpace = stepSpaceMod.stepSpace
self.step = self.task.getStep(self.stepSpace.stepName)
self.script = getScript(scriptModule)
self.script.task = self.task
self.script.step = self.step
self.script.job = self.job
self.script.stepSpace = self.stepSpace
def invoke(self):
self.exitCode = self.script()
def exit(self):
return self.exitCode
if __name__ == '__main__':
try:
stepModule = sys.argv[1]
scriptModule = sys.argv[2]
except Exception as ex:
msg = "Usage: ScriptInvoke.py <Step Module> <Script Module>"
raise RuntimeError(msg)
invoker = ScriptInvoke(stepModule, scriptModule)
try:
invoker.boot()
except Exception as ex:
msg = "Error booting script invoker for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
try:
invoker.invoke()
except Exception as ex:
msg = "Error invoking script for step %s\n" % stepModule
msg += "withe Script module: %s\n" % scriptModule
msg += str(ex)
msg += "Details:\n"
for l in traceback.format_tb(sys.exc_info()[2]):
msg += l
raise RuntimeError(msg)
sys.exit(invoker.exit())
| true
| true
|
1c4640c71ced2b43dbfbe2cdd9de56a41d3e64a9
| 100,233
|
py
|
Python
|
superset/views/core.py
|
Altizon/incubator-superset
|
e55fe43ca67a29518674a1a2137a3dbd4f166864
|
[
"Apache-2.0"
] | null | null | null |
superset/views/core.py
|
Altizon/incubator-superset
|
e55fe43ca67a29518674a1a2137a3dbd4f166864
|
[
"Apache-2.0"
] | 5
|
2021-02-02T22:53:35.000Z
|
2022-03-29T22:28:22.000Z
|
superset/views/core.py
|
mhassant/apache-superset-multi-tenancy
|
e55fe43ca67a29518674a1a2137a3dbd4f166864
|
[
"Apache-2.0"
] | 2
|
2017-12-20T02:44:05.000Z
|
2018-02-09T07:19:49.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, cast, Dict, List, Optional, Union
from urllib import parse
import backoff
import msgpack
import pandas as pd
import pyarrow as pa
import simplejson as json
from flask import abort, flash, g, Markup, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import and_, Integer, or_, select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.session import Session
from werkzeug.urls import Href
import superset.models.core as models
from superset import (
app,
appbuilder,
cache,
conf,
dataframe,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
result_set,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
talisman,
viz,
)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource
from superset.constants import RouteMethod
from superset.exceptions import (
DatabaseNotFound,
SupersetException,
SupersetSecurityException,
SupersetTimeoutException,
)
from superset.jinja_context import get_template_processor
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import ParsedQuery
from superset.sql_validators import get_validator_by_name
from superset.utils import core as utils, dashboard_import_export
from superset.utils.dates import now_as_float
from superset.utils.decorators import etag_cache, stats_timing
from superset.views.database.filters import DatabaseFilter
from .base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
SupersetModelView,
)
from .utils import (
apply_display_max_row_limit,
bootstrap_user_data,
get_datasource_info,
get_form_data,
get_viz,
)
config = app.config
CACHE_DEFAULT_TIMEOUT = config["CACHE_DEFAULT_TIMEOUT"]
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
QueryStatus = utils.QueryStatus
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_csv_upload",
"allow_ctas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
ALL_DATASOURCE_ACCESS_ERR = __(
"This endpoint requires the `all_datasource_access` permission"
)
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
ACCESS_REQUEST_MISSING_ERR = __("The access requests seem to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
FORM_DATA_KEY_BLACKLIST: List[str] = []
if not config["ENABLE_JAVASCRIPT_CONTROLS"]:
FORM_DATA_KEY_BLACKLIST = ["js_tooltip", "js_onclick_href", "js_data_mutator"]
def get_database_access_error_msg(database_name):
return __(
"This view requires the database %(name)s or "
"`all_datasource_access` permission",
name=database_name,
)
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_datasource_perms(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> None:
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
:param datasource_type: The datasource type, i.e., 'druid' or 'table'
:param datasource_id: The datasource ID
:raises SupersetSecurityException: If the user cannot access the resource
"""
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
raise SupersetSecurityException(str(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def check_slice_perms(self, slice_id):
"""
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data, slc = get_form_data(slice_id, use_slice_data=True)
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def _deserialize_results_payload(
payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False
) -> dict:
logger.debug(f"Deserializing from msgpack: {use_msgpack}")
if use_msgpack:
with stats_timing(
"sqllab.query.results_backend_msgpack_deserialize", stats_logger
):
ds_payload = msgpack.loads(payload, raw=False)
with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger):
pa_table = pa.deserialize(ds_payload["data"])
df = result_set.SupersetResultSet.convert_table_to_df(pa_table)
ds_payload["data"] = dataframe.df_to_records(df) or []
db_engine_spec = query.database.db_engine_spec
all_columns, data, expanded_columns = db_engine_spec.expand_data(
ds_payload["selected_columns"], ds_payload["data"]
)
ds_payload.update(
{"data": data, "columns": all_columns, "expanded_columns": expanded_columns}
)
return ds_payload
else:
with stats_timing(
"sqllab.query.results_backend_json_deserialize", stats_logger
):
return json.loads(payload) # type: ignore
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
include_route_methods = RouteMethod.CRUD_SET
list_columns = [
"username",
"user_roles",
"datasource_link",
"roles_with_datasource",
"created_on",
]
order_columns = ["created_on"]
base_order = ("changed_on", "desc")
label_columns = {
"username": _("User"),
"user_roles": _("User Roles"),
"database": _("Database URL"),
"datasource_link": _("Datasource"),
"roles_with_datasource": _("Roles to grant"),
"created_on": _("Created On"),
}
@talisman(force_https=False)
@app.route("/health")
def health():
return "OK"
@talisman(force_https=False)
@app.route("/healthcheck")
def healthcheck():
return "OK"
@talisman(force_https=False)
@app.route("/ping")
def ping():
return "OK"
class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@event_logger.log_this
@has_access_api
@expose("/store/", methods=["POST"])
def store(self):
try:
value = request.form.get("data")
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(json.dumps({"id": obj.id}), status=200)
@event_logger.log_this
@has_access_api
@expose("/<key_id>/", methods=["GET"])
def get_value(self, key_id):
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar()
if not kv:
return Response(status=404, content_type="text/plain")
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200, content_type="text/plain")
class R(BaseSupersetView):
"""used for short urls"""
@event_logger.log_this
@expose("/<url_id>")
def index(self, url_id):
url = db.session.query(models.Url).get(url_id)
if url and url.url:
explore_url = "//superset/explore/?"
if url.url.startswith(explore_url):
explore_url += f"r={url_id}"
return redirect(explore_url[1:])
else:
return redirect(url.url[1:])
else:
flash("URL to nowhere...", "danger")
return redirect("/")
@event_logger.log_this
@has_access_api
@expose("/shortner/", methods=["POST"])
def shortner(self):
url = request.form.get("data")
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
"{scheme}://{request.headers[Host]}/r/{obj.id}".format(
scheme=request.scheme, request=request, obj=obj
),
mimetype="text/plain",
)
class Superset(BaseSupersetView):
"""The base views for Superset!"""
logger = logging.getLogger(__name__)
@has_access_api
@expose("/datasources/")
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources if o.short_data.get("name")]
datasources = sorted(datasources, key=lambda o: o["name"])
return self.json_response(datasources)
@has_access_api
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@event_logger.log_this
@has_access
@expose("/request_access/")
def request_access(self):
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
)
)
if has_access:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@event_logger.log_this
@has_access
@expose("/approve")
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session
)
if not datasource or security_manager.datasource_access(datasource):
# datasource does not exist anymore
session.delete(r)
session.commit()
datasource_type = request.args.get("datasource_type")
datasource_id = request.args.get("datasource_id")
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, "alert")
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
if security_manager.all_datasource_access() or check_ownership(
datasource, raise_if_false=False
):
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for r in requests:
session.delete(r)
session.commit()
return redirect("/accessrequestsmodelview/list/")
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = db.session.query(Slice).filter_by(id=slice_id).one()
return slc.get_viz()
else:
viz_type = form_data.get("viz_type", "table")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
viz_obj = viz.viz_types[viz_type](
datasource, form_data=form_data, force=force
)
return viz_obj
@has_access
@expose("/slice/<slice_id>/")
def slice(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
param = utils.ReservedUrlParameters.STANDALONE.value
if request.args.get(param) == "true":
endpoint += f"&{param}=true"
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logger.exception(e)
return json_error_response(e)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj):
return self.json_response(
{"data": viz_obj.get_df_payload()["df"].to_dict("records")}
)
def get_samples(self, viz_obj):
return self.json_response({"data": viz_obj.get_samples()})
def generate_json(
self, viz_obj, csv=False, query=False, results=False, samples=False
):
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers("csv"),
mimetype="application/csv",
)
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<slice_id>")
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)
def slice_json(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
@event_logger.log_this
@api
@has_access_api
@expose("/annotation_json/<layer_id>")
def annotation_json(self, layer_id):
form_data = get_form_data()[0]
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=False)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@expose(
"/explore_json/<datasource_type>/<datasource_id>/", methods=EXPLORE_JSON_METHODS
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get("csv") == "true"
query = request.args.get("query") == "true"
results = request.args.get("results") == "true"
samples = request.args.get("samples") == "true"
force = request.args.get("force") == "true"
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
return json_error_response(utils.error_msg_from_exception(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj, csv=csv, query=query, results=results, samples=samples
)
@event_logger.log_this
@has_access
@expose("/import_dashboards", methods=["GET", "POST"])
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get("file")
if request.method == "POST" and f:
try:
dashboard_import_export.import_dashboards(db.session, f.stream)
except DatabaseNotFound as e:
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=e,
),
"danger",
)
except Exception as e:
logger.exception(e)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
return redirect("/dashboard/list/")
return self.render_template("superset/import_dashboards.html")
@event_logger.log_this
@has_access
@expose("/explore/<datasource_type>/<datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = get_form_data(use_slice_data=True)
# Flash the SIP-15 message if the slice is owned by the current user and has not
# been updated, i.e., is not using the [start, end) interval.
if (
config["SIP_15_ENABLED"]
and slc
and g.user in slc.owners
and (
not form_data.get("time_range_endpoints")
or form_data["time_range_endpoints"]
!= (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
)
)
):
url = Href("/superset/explore/")(
{
"form_data": json.dumps(
{
"slice_id": slc.id,
"time_range_endpoints": (
utils.TimeRangeEndpoint.INCLUSIVE.value,
utils.TimeRangeEndpoint.EXCLUSIVE.value,
),
}
)
}
)
flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url)))
error_redirect = "/chart/list/"
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
return redirect(error_redirect)
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "danger")
return redirect(error_redirect)
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access("can_add", "SliceModelView")
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
"can_download", "SliceModelView"
)
form_data["datasource"] = str(datasource_id) + "__" + datasource_type
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
# merge request url params
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=400,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=400,
)
if action in ("saveas", "overwrite"):
return self.save_or_overwrite_slice(
request.args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name,
)
standalone = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": slice_overwrite_perm,
"datasource": datasource.data,
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone,
"user_id": user_id,
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
if slc:
title = slc.slice_name
else:
title = _("Explore - %(table)s", table=table_name)
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title,
standalone_mode=standalone,
)
@api
@handle_api_exception
@has_access_api
@expose("/filter/<datasource_type>/<datasource_id>/<column>/")
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]),
default=utils.json_int_dttm_ser,
)
return json_success(payload)
def save_or_overwrite_slice(
self,
args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource_name,
):
"""Save or overwrite a slice"""
slice_name = args.get("slice_name")
action = args.get("action")
form_data = get_form_data()[0]
if action in ("saveas"):
if "slice_id" in form_data:
form_data.pop("slice_id") # don't save old slice_id
slc = Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ("saveas") and slice_add_perm:
self.save_slice(slc)
elif action == "overwrite" and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get("add_to_dash") == "existing":
dash = (
db.session.query(Dashboard)
.filter_by(id=int(request.args.get("save_to_dashboard_id")))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=400,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"info",
)
elif request.args.get("add_to_dash") == "new":
# check create dashboard permissions
dash_add_perm = security_manager.can_access("can_add", "DashboardModelView")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=400,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"info",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": is_owner(slc, g.user),
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_id": dash.id if dash else None,
}
if request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _("Chart [{}] has been saved").format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, "info")
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "info")
@api
@has_access_api
@expose("/schemas/<db_id>/")
@expose("/schemas/<db_id>/<force_refresh>/")
def schemas(self, db_id, force_refresh="false"):
db_id = int(db_id)
force_refresh = force_refresh.lower() == "true"
database = db.session.query(models.Database).get(db_id)
if database:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=force_refresh,
)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
else:
schemas = []
return Response(json.dumps({"schemas": schemas}), mimetype="application/json")
@api
@has_access_api
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
def tables(
self, db_id: int, schema: str, substr: str, force_refresh: str = "false"
):
"""Endpoint to fetch the list of tables for given database"""
# Guarantees database filtering by security access
query = db.session.query(models.Database)
query = DatabaseFilter("id", SQLAInterface(models.Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = (
database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
views = (
database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
else:
tables = database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
views = database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
if substr_parsed:
tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]
views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]
if not schema_parsed and database.default_schemas:
user_schema = g.user.email.split("@")[0]
valid_schemas = set(database.default_schemas + [user_schema])
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose("/copy_dash/<dashboard_id>/", methods=["GET", "POST"])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form.get("data"))
dash = models.Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids["{}".format(slc.id)] = "{}".format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data["positions"].values():
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta").get("chartId")
):
old_id = "{}".format(value.get("meta").get("chartId"))
new_id = int(old_to_new_sliceids[old_id])
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose("/save_dash/<dashboard_id>/", methods=["GET", "POST"])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get("data"))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({"status": "SUCCESS"}))
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data["positions"]
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if isinstance(value, dict):
try:
slice_id = value["meta"]["chartId"]
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value["meta"]["sliceName"]
except KeyError:
pass
session = db.session()
current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
# we allow user set slice name be empty string
for slc in dashboard.slices:
try:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
except KeyError:
pass
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(",", ":"), sort_keys=True
)
md = dashboard.params_dict
dashboard.css = data.get("css")
dashboard.dashboard_title = data["dashboard_title"]
if "timed_refresh_immune_slices" not in md:
md["timed_refresh_immune_slices"] = []
if "filter_scopes" in data:
md["filter_scopes"] = json.loads(data["filter_scopes"] or "{}")
md["expanded_slices"] = data["expanded_slices"]
md["refresh_frequency"] = data.get("refresh_frequency", 0)
default_filters_data = json.loads(data.get("default_filters", "{}"))
applicable_filters = {
key: v for key, v in default_filters_data.items() if int(key) in slice_ids
}
md["default_filters"] = json.dumps(applicable_filters)
if data.get("color_namespace"):
md["color_namespace"] = data.get("color_namespace")
if data.get("color_scheme"):
md["color_scheme"] = data.get("color_scheme")
if data.get("label_colors"):
md["label_colors"] = data.get("label_colors")
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose("/add_slices/<dashboard_id>/", methods=["POST"])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get("data"))
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@expose("/testconn", methods=["POST", "GET"])
def testconn(self):
"""Tests a sqla connection"""
try:
db_name = request.json.get("name")
uri = request.json.get("uri")
# if the database already exists in the database, only its safe (password-masked) URI
# would be shown in the UI and would be passed in the form data.
# so if the database already exists and the form was submitted with the safe URI,
# we assume we should retrieve the decrypted URI to test the connection.
if db_name:
existing_database = (
db.session.query(models.Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# this is the database instance that will be tested
database = models.Database(
# extras is sent as json, but required to be a string in the Database model
extra=json.dumps(request.json.get("extras", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
username = g.user.username if g.user is not None else None
engine = database.get_sqla_engine(user_name=username)
with closing(engine.connect()) as conn:
conn.scalar(select([1]))
return json_success('"OK"')
except Exception as e:
logger.exception(e)
return json_error_response(
"Connection failed!\n\n" f"The error message returned was:\n{e}", 400
)
@api
@has_access_api
@expose("/recent_activity/<user_id>/", methods=["GET"])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models
if request.args.get("limit"):
limit = int(request.args.get("limit"))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, Slice)
.outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id)
.outerjoin(Slice, Slice.id == M.Log.slice_id)
.filter(
and_(
~M.Log.action.in_(("queries", "shortner", "sql_json")),
M.Log.user_id == user_id,
)
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append(
{
"action": log.Log.action,
"item_url": item_url,
"item_title": item_title,
"time": log.Log.dttm,
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/csrf_token/", methods=["GET"])
def csrf_token(self):
return Response(
self.render_template("superset/csrf_token.json"), mimetype="text/json"
)
@api
@has_access_api
@expose("/available_domains/", methods=["GET"])
def available_domains(self):
"""
Returns the list of available Superset Webserver domains (if any)
defined in config. This enables charts embedded in other apps to
leverage domain sharding if appropriately configured.
"""
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose("/fave_dashboards/<user_id>/", methods=["GET"])
def fave_dashboards(self, user_id):
qry = (
db.session.query(Dashboard, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "Dashboard",
Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_dashboards/<user_id>/", methods=["GET"])
def created_dashboards(self, user_id):
Dash = Dashboard
qry = (
db.session.query(Dash)
.filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id))
.order_by(Dash.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<user_id>/", methods=["GET"])
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
FavStar = models.FavStar
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
isouter=True,
)
.filter(
or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<user_id>/", methods=["GET"])
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice)
.filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<user_id>/", methods=["GET"])
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get("slice_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources["table"]
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name,
s=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
logger.exception("Failed to warm up cache")
return json_error_response(utils.error_msg_from_exception(e))
return json_success(
json.dumps(
[{"slice_id": slc.id, "slice_name": slc.slice_name} for slc in slices]
)
)
@has_access_api
@expose("/favstar/<class_name>/<obj_id>/<action>/")
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@api
@has_access_api
@expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST"))
def publish(self, dashboard_id):
"""Gets and toggles published status on dashboards"""
logger.warning(
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
session = db.session()
Role = ab_models.Role
dash = (
session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()
)
admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none()
if request.method == "GET":
if dash:
return json_success(json.dumps({"published": dash.published}))
else:
return json_error_response(
f"ERROR: cannot find dashboard {dashboard_id}", status=404
)
else:
edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()
if not edit_perm:
return json_error_response(
f'ERROR: "{g.user.username}" cannot alter dashboard "{dash.dashboard_title}"',
status=403,
)
dash.published = str(request.form["published"]).lower() == "true"
session.commit()
return json_success(json.dumps({"published": dash.published}))
@has_access
@expose("/dashboard/<dashboard_id>/")
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
"superset/request_access/?" f"dashboard_id={dash.id}&"
)
dash_edit_perm = check_ownership(
dash, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
dash_save_perm = security_manager.can_access("can_save_dash", "Superset")
superset_can_explore = security_manager.can_access("can_explore", "Superset")
superset_can_csv = security_manager.can_access("can_csv", "Superset")
slice_can_edit = security_manager.can_access("can_edit", "SliceModelView")
standalone_mode = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
# Hack to log the dashboard_id properly, even when getting a slug
@event_logger.log_this
def dashboard(**kwargs):
pass
dashboard(
dashboard_id=dash.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
dashboard_data = dash.data
dashboard_data.update(
{
"standalone_mode": standalone_mode,
"dash_save_perm": dash_save_perm,
"dash_edit_perm": dash_edit_perm,
"superset_can_explore": superset_can_explore,
"superset_can_csv": superset_can_csv,
"slice_can_edit": slice_can_edit,
}
)
url_params = {
key: value
for key, value in request.args.items()
if key not in [param.value for param in utils.ReservedUrlParameters]
}
bootstrap_data = {
"user_id": g.user.get_id(),
"dashboard_data": dashboard_data,
"datasources": {ds.uid: ds.data for ds in datasources},
"common": common_bootstrap_payload(),
"editMode": edit_mode,
"urlParams": url_params,
}
if request.args.get("json") == "true":
return json_success(
json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)
)
return self.render_template(
"superset/dashboard.html",
entry="dashboard",
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
)
@api
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self):
return Response(status=200)
@has_access
@expose("/sync_druid/", methods=["POST"])
@event_logger.log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload["config"]
user_name = payload["user"]
cluster_name = payload["cluster"]
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources["druid"]
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __(
"Can't find User '%(name)s', please ask your admin " "to create one.",
name=user_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name=cluster_name)
.one_or_none()
)
if not cluster:
err_msg = __(
"Can't find DruidCluster with cluster_name = " "'%(name)s'",
name=cluster_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
except Exception as e:
logger.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources["table"]
data = json.loads(request.form.get("data"))
table_name = data.get("datasourceName")
database_id = data.get("dbId")
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
q = ParsedQuery(data.get("sql"))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get("columns"):
column_name = config.get("name")
SqlaTable = ConnectorRegistry.sources["table"]
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get("is_date", False),
type=config.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose("/select_star/<database_id>/<table_name>")
@expose("/select_star/<database_id>/<table_name>/<schema>")
@event_logger.log_this
def select_star(self, database_id, table_name, schema=None):
logging.warning(
f"{self.__class__.__name__}.select_star "
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
stats_logger.incr(f"{self.__class__.__name__}.select_star.init")
database = db.session.query(models.Database).get(database_id)
if not database:
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.database_not_found"
)
return json_error_response("Not found", 404)
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
# Check that the user can access the datasource
if not self.appbuilder.sm.can_access_datasource(database, table_name, schema):
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.permission_denied"
)
logging.warning(
f"Permission denied for user {g.user} on table: {table_name} "
f"schema: {schema}"
)
return json_error_response("Not found", 404)
stats_logger.incr(f"deprecated.{self.__class__.__name__}.select_star.success")
return json_success(
database.select_star(
table_name, schema, latest_partition=True, show_cols=True
)
)
@has_access_api
@expose("/estimate_query_cost/<database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost(
self, database_id: int, schema: Optional[str] = None
) -> Response:
mydb = db.session.query(models.Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.sources.get("sql_lab")
)
except SupersetTimeoutException as e:
logger.exception(e)
return json_error_response(timeout_msg)
except Exception as e:
return json_error_response(str(e))
spec = mydb.db_engine_spec
query_cost_formatters = get_feature_flags().get(
"QUERY_COST_FORMATTERS_BY_ENGINE", {}
)
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self):
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key):
return self.results_exec(key)
def results_exec(self, key: str):
"""Serves a key off of the results backend
It is possible to pass the `rows` query argument to limit the number
of rows returned.
"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables), status=403
)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
obj: dict = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
return json_success(
json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)
)
@has_access_api
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self):
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.error(
f"Query with client_id {client_id} could not be stopped: query already complete"
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@expose("/validate_sql_json/", methods=["POST", "GET"])
@event_logger.log_this
def validate_sql_json(self):
"""Validates that arbitrary sql is acceptable for the given database.
Returns a list of error/warning annotations as json.
"""
sql = request.form.get("sql")
database_id = request.form.get("database_id")
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as e:
logger.exception(e)
msg = _(
f"{validator.name} was unable to check your query.\n"
"Please recheck your query.\n"
f"Exception: {e}"
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(e)):
return json_error_response(f"{msg}", status=400)
else:
return json_error_response(f"{msg}")
def _sql_json_async(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
"""
Send SQL JSON query to celery workers
:param session: SQLAlchemy session object
:param rendered_query: the rendered query to perform by workers
:param query: The query (SQLAlchemy) object
:return: String JSON response
"""
logger.info(f"Query {query.id}: Running query on a Celery worker")
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query.id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response("{}".format(msg))
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
def _sql_json_sync(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
"""
Execute SQL query (sql json)
:param rendered_query: The rendered query (included templates)
:param query: The query SQL (SQLAlchemy) object
:return: String JSON response
"""
try:
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username if g.user else None,
expand_data=expand_data,
log_params=log_params,
)
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
return json_error_response(f"{{e}}")
if data.get("status") == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access_api
@expose("/sql_json/", methods=["POST"])
@event_logger.log_this
def sql_json(self):
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
return self.sql_json_exec(request.json, log_params)
def sql_json_exec(
self, query_params: dict, log_params: Optional[Dict[str, Any]] = None
):
"""Runs arbitrary sql and returns data as json"""
# Collect Values
database_id: int = cast(int, query_params.get("database_id"))
schema: str = cast(str, query_params.get("schema"))
sql: str = cast(str, query_params.get("sql"))
try:
template_params: dict = json.loads(
query_params.get("templateParams") or "{}"
)
except json.JSONDecodeError:
logger.warning(
f"Invalid template parameter {query_params.get('templateParams')}"
" specified. Defaulting to empty dict"
)
template_params = {}
limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"]
async_flag: bool = cast(bool, query_params.get("runAsync"))
if limit < 0:
logger.warning(
f"Invalid limit of {limit} specified. Defaulting to max limit."
)
limit = 0
select_as_cta: bool = cast(bool, query_params.get("select_as_cta"))
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(models.Database).get(database_id)
if not mydb:
return json_error_response(f"Database with id {database_id} is missing.")
# Set tmp_table_name for CTA
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = f"{mydb.force_ctas_schema}.{tmp_table_name}"
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as e:
logger.error(f"Errors saving query details {e}")
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info(f"Triggering query_id: {query_id}")
rejected_tables = security_manager.rejected_tables(sql, mydb, schema)
if rejected_tables:
query.status = QueryStatus.FAILED
session.commit()
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403,
)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except Exception as e:
error_msg = utils.error_msg_from_exception(e)
return json_error_response(
f"Query {query_id}: Template rendering failed: {error_msg}"
)
# set LIMIT after template processing
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
query.limit = min(lim for lim in limits if lim is not None)
# Flag for whether or not to expand data
# (feature that will expand Presto row objects and arrays)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
# Async request.
if async_flag:
return self._sql_json_async(
session, rendered_query, query, expand_data, log_params
)
# Sync request.
return self._sql_json_sync(
session, rendered_query, query, expand_data, log_params
)
@has_access
@expose("/csv/<client_id>")
@event_logger.log_this
def csv(self, client_id):
"""Download the query results as csv."""
logger.info("Exporting CSV file [{}]".format(client_id))
query = db.session.query(Query).filter_by(client_id=client_id).one()
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
flash(security_manager.get_table_access_error_msg(rejected_tables))
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info(
"Fetching CSV from results backend " "[{}]".format(query.results_key)
)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, results_backend_use_msgpack
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
else:
logger.info("Running a query to turn into CSV")
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
response = Response(csv, mimetype="text/csv")
response.headers[
"Content-Disposition"
] = f"attachment; filename={query.name}.csv"
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
logger.info(
f"CSV exported: {repr(event_info)}", extra={"superset_event": event_info}
)
return response
@api
@handle_api_exception
@has_access
@expose("/fetch_datasource_metadata")
@event_logger.log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = request.args.get("datasourceKey").split("__")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
# Check permission for datasource
security_manager.assert_datasource_permission(datasource)
return json_success(json.dumps(datasource.data))
@has_access_api
@expose("/queries/<last_updated_ms>")
def queries(self, last_updated_ms):
"""
Get the updated queries.
:param last_updated_ms: unix time, milliseconds
"""
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
return self.queries_exec(last_updated_ms_int)
def queries_exec(self, last_updated_ms_int: int):
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose("/search_queries")
@event_logger.log_this
def search_queries(self) -> Response:
"""
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts
"""
query = db.session.query(Query)
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif (
request.args.get("user_id") is not None
and request.args.get("user_id") != g.user.get_user_id()
):
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get("from")
to_time = request.args.get("to")
if search_user_id:
# Filter on user_id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query.filter(Query.sql.like("%{}%".format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self):
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@expose("/welcome")
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/welcome.html",
entry="welcome",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@expose("/profile/<username>/")
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_payload(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [tab_state[0] for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {}
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
databases = {
database.id: {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
for database in db.session.query(models.Database).all()
}
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids))
.all()
)
queries = {
query.client_id: {k: v for k, v in query.to_dict().items()}
for query in user_queries
}
return {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@expose("/sqllab")
def sqllab(self):
"""SQL Editor"""
payload = self._get_sqllab_payload(g.user.get_id())
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@api
@handle_api_exception
@has_access_api
@expose("/slice_query/<slice_id>/")
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_viz_permission(viz_obj)
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose("/schemas_access_for_csv_upload")
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your csv upload")
db_id = int(request.args.get("db_id"))
database = db.session.query(models.Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as e:
logger.exception(e)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("CSS Templates")
show_title = _("Show CSS Template")
add_title = _("Add CSS Template")
edit_title = _("Edit CSS Template")
list_columns = ["template_name"]
edit_columns = ["template_name", "css"]
add_columns = edit_columns
label_columns = {"template_name": _("Template Name")}
class CssTemplateAsyncModelView(CssTemplateModelView):
include_route_methods = {RouteMethod.API_READ}
list_columns = ["template_name", "css"]
@app.after_request
def apply_http_headers(response: Response):
"""Applies the configuration's http headers to all responses"""
# HTTP_HEADERS is deprecated, this provides backwards compatibility
response.headers.extend(
{**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]}
)
for k, v in config["DEFAULT_HTTP_HEADERS"].items():
if k not in response.headers:
response.headers[k] = v
return response
| 36.382214
| 98
| 0.582992
|
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, cast, Dict, List, Optional, Union
from urllib import parse
import backoff
import msgpack
import pandas as pd
import pyarrow as pa
import simplejson as json
from flask import abort, flash, g, Markup, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _
from sqlalchemy import and_, Integer, or_, select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.session import Session
from werkzeug.urls import Href
import superset.models.core as models
from superset import (
app,
appbuilder,
cache,
conf,
dataframe,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
result_set,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
talisman,
viz,
)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource
from superset.constants import RouteMethod
from superset.exceptions import (
DatabaseNotFound,
SupersetException,
SupersetSecurityException,
SupersetTimeoutException,
)
from superset.jinja_context import get_template_processor
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import ParsedQuery
from superset.sql_validators import get_validator_by_name
from superset.utils import core as utils, dashboard_import_export
from superset.utils.dates import now_as_float
from superset.utils.decorators import etag_cache, stats_timing
from superset.views.database.filters import DatabaseFilter
from .base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
SupersetModelView,
)
from .utils import (
apply_display_max_row_limit,
bootstrap_user_data,
get_datasource_info,
get_form_data,
get_viz,
)
config = app.config
CACHE_DEFAULT_TIMEOUT = config["CACHE_DEFAULT_TIMEOUT"]
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
QueryStatus = utils.QueryStatus
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_csv_upload",
"allow_ctas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
ALL_DATASOURCE_ACCESS_ERR = __(
"This endpoint requires the `all_datasource_access` permission"
)
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
ACCESS_REQUEST_MISSING_ERR = __("The access requests seem to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
FORM_DATA_KEY_BLACKLIST: List[str] = []
if not config["ENABLE_JAVASCRIPT_CONTROLS"]:
FORM_DATA_KEY_BLACKLIST = ["js_tooltip", "js_onclick_href", "js_data_mutator"]
def get_database_access_error_msg(database_name):
return __(
"This view requires the database %(name)s or "
"`all_datasource_access` permission",
name=database_name,
)
def is_owner(obj, user):
return obj and user in obj.owners
def check_datasource_perms(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> None:
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
raise SupersetSecurityException(str(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def check_slice_perms(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
security_manager.assert_viz_permission(viz_obj)
def _deserialize_results_payload(
payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False
) -> dict:
logger.debug(f"Deserializing from msgpack: {use_msgpack}")
if use_msgpack:
with stats_timing(
"sqllab.query.results_backend_msgpack_deserialize", stats_logger
):
ds_payload = msgpack.loads(payload, raw=False)
with stats_timing("sqllab.query.results_backend_pa_deserialize", stats_logger):
pa_table = pa.deserialize(ds_payload["data"])
df = result_set.SupersetResultSet.convert_table_to_df(pa_table)
ds_payload["data"] = dataframe.df_to_records(df) or []
db_engine_spec = query.database.db_engine_spec
all_columns, data, expanded_columns = db_engine_spec.expand_data(
ds_payload["selected_columns"], ds_payload["data"]
)
ds_payload.update(
{"data": data, "columns": all_columns, "expanded_columns": expanded_columns}
)
return ds_payload
else:
with stats_timing(
"sqllab.query.results_backend_json_deserialize", stats_logger
):
return json.loads(payload)
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
include_route_methods = RouteMethod.CRUD_SET
list_columns = [
"username",
"user_roles",
"datasource_link",
"roles_with_datasource",
"created_on",
]
order_columns = ["created_on"]
base_order = ("changed_on", "desc")
label_columns = {
"username": _("User"),
"user_roles": _("User Roles"),
"database": _("Database URL"),
"datasource_link": _("Datasource"),
"roles_with_datasource": _("Roles to grant"),
"created_on": _("Created On"),
}
@talisman(force_https=False)
@app.route("/health")
def health():
return "OK"
@talisman(force_https=False)
@app.route("/healthcheck")
def healthcheck():
return "OK"
@talisman(force_https=False)
@app.route("/ping")
def ping():
return "OK"
class KV(BaseSupersetView):
@event_logger.log_this
@has_access_api
@expose("/store/", methods=["POST"])
def store(self):
try:
value = request.form.get("data")
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(json.dumps({"id": obj.id}), status=200)
@event_logger.log_this
@has_access_api
@expose("/<key_id>/", methods=["GET"])
def get_value(self, key_id):
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).scalar()
if not kv:
return Response(status=404, content_type="text/plain")
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200, content_type="text/plain")
class R(BaseSupersetView):
@event_logger.log_this
@expose("/<url_id>")
def index(self, url_id):
url = db.session.query(models.Url).get(url_id)
if url and url.url:
explore_url = "//superset/explore/?"
if url.url.startswith(explore_url):
explore_url += f"r={url_id}"
return redirect(explore_url[1:])
else:
return redirect(url.url[1:])
else:
flash("URL to nowhere...", "danger")
return redirect("/")
@event_logger.log_this
@has_access_api
@expose("/shortner/", methods=["POST"])
def shortner(self):
url = request.form.get("data")
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
"{scheme}://{request.headers[Host]}/r/{obj.id}".format(
scheme=request.scheme, request=request, obj=obj
),
mimetype="text/plain",
)
class Superset(BaseSupersetView):
logger = logging.getLogger(__name__)
@has_access_api
@expose("/datasources/")
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources if o.short_data.get("name")]
datasources = sorted(datasources, key=lambda o: o["name"])
return self.json_response(datasources)
@has_access_api
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self):
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
role.permissions = []
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@event_logger.log_this
@has_access
@expose("/request_access/")
def request_access(self):
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
)
)
if has_access:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@event_logger.log_this
@has_access
@expose("/approve")
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session
)
if not datasource or security_manager.datasource_access(datasource):
session.delete(r)
session.commit()
datasource_type = request.args.get("datasource_type")
datasource_id = request.args.get("datasource_id")
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, "alert")
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
if security_manager.all_datasource_access() or check_ownership(
datasource, raise_if_false=False
):
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for r in requests:
session.delete(r)
session.commit()
return redirect("/accessrequestsmodelview/list/")
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = db.session.query(Slice).filter_by(id=slice_id).one()
return slc.get_viz()
else:
viz_type = form_data.get("viz_type", "table")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
viz_obj = viz.viz_types[viz_type](
datasource, form_data=form_data, force=force
)
return viz_obj
@has_access
@expose("/slice/<slice_id>/")
def slice(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
param = utils.ReservedUrlParameters.STANDALONE.value
if request.args.get(param) == "true":
endpoint += f"&{param}=true"
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logger.exception(e)
return json_error_response(e)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj):
return self.json_response(
{"data": viz_obj.get_df_payload()["df"].to_dict("records")}
)
def get_samples(self, viz_obj):
return self.json_response({"data": viz_obj.get_samples()})
def generate_json(
self, viz_obj, csv=False, query=False, results=False, samples=False
):
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers("csv"),
mimetype="application/csv",
)
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<slice_id>")
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)
def slice_json(self, slice_id):
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
@event_logger.log_this
@api
@has_access_api
@expose("/annotation_json/<layer_id>")
def annotation_json(self, layer_id):
form_data = get_form_data()[0]
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=False)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@expose(
"/explore_json/<datasource_type>/<datasource_id>/", methods=EXPLORE_JSON_METHODS
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)
def explore_json(self, datasource_type=None, datasource_id=None):
csv = request.args.get("csv") == "true"
query = request.args.get("query") == "true"
results = request.args.get("results") == "true"
samples = request.args.get("samples") == "true"
force = request.args.get("force") == "true"
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException as e:
return json_error_response(utils.error_msg_from_exception(e))
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj, csv=csv, query=query, results=results, samples=samples
)
@event_logger.log_this
@has_access
@expose("/import_dashboards", methods=["GET", "POST"])
def import_dashboards(self):
f = request.files.get("file")
if request.method == "POST" and f:
try:
dashboard_import_export.import_dashboards(db.session, f.stream)
except DatabaseNotFound as e:
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=e,
),
"danger",
)
except Exception as e:
logger.exception(e)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
return redirect("/dashboard/list/")
return self.render_template("superset/import_dashboards.html")
@event_logger.log_this
@has_access
@expose("/explore/<datasource_type>/<datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = get_form_data(use_slice_data=True)
if (
config["SIP_15_ENABLED"]
and slc
and g.user in slc.owners
and (
not form_data.get("time_range_endpoints")
or form_data["time_range_endpoints"]
!= (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
)
)
):
url = Href("/superset/explore/")(
{
"form_data": json.dumps(
{
"slice_id": slc.id,
"time_range_endpoints": (
utils.TimeRangeEndpoint.INCLUSIVE.value,
utils.TimeRangeEndpoint.EXCLUSIVE.value,
),
}
)
}
)
flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url)))
error_redirect = "/chart/list/"
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
return redirect(error_redirect)
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "danger")
return redirect(error_redirect)
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
slice_add_perm = security_manager.can_access("can_add", "SliceModelView")
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
"can_download", "SliceModelView"
)
form_data["datasource"] = str(datasource_id) + "__" + datasource_type
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=400,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=400,
)
if action in ("saveas", "overwrite"):
return self.save_or_overwrite_slice(
request.args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name,
)
standalone = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": slice_overwrite_perm,
"datasource": datasource.data,
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone,
"user_id": user_id,
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
if slc:
title = slc.slice_name
else:
title = _("Explore - %(table)s", table=table_name)
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title,
standalone_mode=standalone,
)
@api
@handle_api_exception
@has_access_api
@expose("/filter/<datasource_type>/<datasource_id>/<column>/")
def filter(self, datasource_type, datasource_id, column):
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]),
default=utils.json_int_dttm_ser,
)
return json_success(payload)
def save_or_overwrite_slice(
self,
args,
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource_name,
):
slice_name = args.get("slice_name")
action = args.get("action")
form_data = get_form_data()[0]
if action in ("saveas"):
if "slice_id" in form_data:
form_data.pop("slice_id")
slc = Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ("saveas") and slice_add_perm:
self.save_slice(slc)
elif action == "overwrite" and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get("add_to_dash") == "existing":
dash = (
db.session.query(Dashboard)
.filter_by(id=int(request.args.get("save_to_dashboard_id")))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=400,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"info",
)
elif request.args.get("add_to_dash") == "new":
dash_add_perm = security_manager.can_access("can_add", "DashboardModelView")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=400,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"info",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": is_owner(slc, g.user),
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_id": dash.id if dash else None,
}
if request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _("Chart [{}] has been saved").format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, "info")
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "info")
@api
@has_access_api
@expose("/schemas/<db_id>/")
@expose("/schemas/<db_id>/<force_refresh>/")
def schemas(self, db_id, force_refresh="false"):
db_id = int(db_id)
force_refresh = force_refresh.lower() == "true"
database = db.session.query(models.Database).get(db_id)
if database:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=force_refresh,
)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
else:
schemas = []
return Response(json.dumps({"schemas": schemas}), mimetype="application/json")
@api
@has_access_api
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
def tables(
self, db_id: int, schema: str, substr: str, force_refresh: str = "false"
):
# Guarantees database filtering by security access
query = db.session.query(models.Database)
query = DatabaseFilter("id", SQLAInterface(models.Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = (
database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
views = (
database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
else:
tables = database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
views = database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
if substr_parsed:
tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]
views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]
if not schema_parsed and database.default_schemas:
user_schema = g.user.email.split("@")[0]
valid_schemas = set(database.default_schemas + [user_schema])
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose("/copy_dash/<dashboard_id>/", methods=["GET", "POST"])
def copy_dash(self, dashboard_id):
session = db.session()
data = json.loads(request.form.get("data"))
dash = models.Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids["{}".format(slc.id)] = "{}".format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data["positions"].values():
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta").get("chartId")
):
old_id = "{}".format(value.get("meta").get("chartId"))
new_id = int(old_to_new_sliceids[old_id])
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose("/save_dash/<dashboard_id>/", methods=["GET", "POST"])
def save_dash(self, dashboard_id):
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get("data"))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({"status": "SUCCESS"}))
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data["positions"]
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if isinstance(value, dict):
try:
slice_id = value["meta"]["chartId"]
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value["meta"]["sliceName"]
except KeyError:
pass
session = db.session()
current_slices = session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
# we allow user set slice name be empty string
for slc in dashboard.slices:
try:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
except KeyError:
pass
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(",", ":"), sort_keys=True
)
md = dashboard.params_dict
dashboard.css = data.get("css")
dashboard.dashboard_title = data["dashboard_title"]
if "timed_refresh_immune_slices" not in md:
md["timed_refresh_immune_slices"] = []
if "filter_scopes" in data:
md["filter_scopes"] = json.loads(data["filter_scopes"] or "{}")
md["expanded_slices"] = data["expanded_slices"]
md["refresh_frequency"] = data.get("refresh_frequency", 0)
default_filters_data = json.loads(data.get("default_filters", "{}"))
applicable_filters = {
key: v for key, v in default_filters_data.items() if int(key) in slice_ids
}
md["default_filters"] = json.dumps(applicable_filters)
if data.get("color_namespace"):
md["color_namespace"] = data.get("color_namespace")
if data.get("color_scheme"):
md["color_scheme"] = data.get("color_scheme")
if data.get("label_colors"):
md["label_colors"] = data.get("label_colors")
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose("/add_slices/<dashboard_id>/", methods=["POST"])
def add_slices(self, dashboard_id):
data = json.loads(request.form.get("data"))
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@expose("/testconn", methods=["POST", "GET"])
def testconn(self):
try:
db_name = request.json.get("name")
uri = request.json.get("uri")
# if the database already exists in the database, only its safe (password-masked) URI
# would be shown in the UI and would be passed in the form data.
# so if the database already exists and the form was submitted with the safe URI,
# we assume we should retrieve the decrypted URI to test the connection.
if db_name:
existing_database = (
db.session.query(models.Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# this is the database instance that will be tested
database = models.Database(
# extras is sent as json, but required to be a string in the Database model
extra=json.dumps(request.json.get("extras", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
username = g.user.username if g.user is not None else None
engine = database.get_sqla_engine(user_name=username)
with closing(engine.connect()) as conn:
conn.scalar(select([1]))
return json_success('"OK"')
except Exception as e:
logger.exception(e)
return json_error_response(
"Connection failed!\n\n" f"The error message returned was:\n{e}", 400
)
@api
@has_access_api
@expose("/recent_activity/<user_id>/", methods=["GET"])
def recent_activity(self, user_id):
M = models
if request.args.get("limit"):
limit = int(request.args.get("limit"))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, Slice)
.outerjoin(M.Dashboard, M.Dashboard.id == M.Log.dashboard_id)
.outerjoin(Slice, Slice.id == M.Log.slice_id)
.filter(
and_(
~M.Log.action.in_(("queries", "shortner", "sql_json")),
M.Log.user_id == user_id,
)
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append(
{
"action": log.Log.action,
"item_url": item_url,
"item_title": item_title,
"time": log.Log.dttm,
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/csrf_token/", methods=["GET"])
def csrf_token(self):
return Response(
self.render_template("superset/csrf_token.json"), mimetype="text/json"
)
@api
@has_access_api
@expose("/available_domains/", methods=["GET"])
def available_domains(self):
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username):
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose("/fave_dashboards/<user_id>/", methods=["GET"])
def fave_dashboards(self, user_id):
qry = (
db.session.query(Dashboard, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "Dashboard",
Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_dashboards/<user_id>/", methods=["GET"])
def created_dashboards(self, user_id):
Dash = Dashboard
qry = (
db.session.query(Dash)
.filter(or_(Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id))
.order_by(Dash.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<user_id>/", methods=["GET"])
def user_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
FavStar = models.FavStar
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
isouter=True,
)
.filter(
or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<user_id>/", methods=["GET"])
def created_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice)
.filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<user_id>/", methods=["GET"])
def fave_slices(self, user_id=None):
if not user_id:
user_id = g.user.id
qry = (
db.session.query(Slice, models.FavStar.dttm)
.join(
models.FavStar,
and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == "slice",
Slice.id == models.FavStar.obj_id,
),
)
.order_by(models.FavStar.dttm.desc())
)
payload = []
for o in qry.all():
d = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d["creator"] = str(user)
d["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(d)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache(self):
slices = None
session = db.session()
slice_id = request.args.get("slice_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources["table"]
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name,
s=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
logger.exception("Failed to warm up cache")
return json_error_response(utils.error_msg_from_exception(e))
return json_success(
json.dumps(
[{"slice_id": slc.id, "slice_name": slc.slice_name} for slc in slices]
)
)
@has_access_api
@expose("/favstar/<class_name>/<obj_id>/<action>/")
def favstar(self, class_name, obj_id, action):
session = db.session()
FavStar = models.FavStar
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@api
@has_access_api
@expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST"))
def publish(self, dashboard_id):
logger.warning(
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
session = db.session()
Role = ab_models.Role
dash = (
session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()
)
admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none()
if request.method == "GET":
if dash:
return json_success(json.dumps({"published": dash.published}))
else:
return json_error_response(
f"ERROR: cannot find dashboard {dashboard_id}", status=404
)
else:
edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()
if not edit_perm:
return json_error_response(
f'ERROR: "{g.user.username}" cannot alter dashboard "{dash.dashboard_title}"',
status=403,
)
dash.published = str(request.form["published"]).lower() == "true"
session.commit()
return json_success(json.dumps({"published": dash.published}))
@has_access
@expose("/dashboard/<dashboard_id>/")
def dashboard(self, dashboard_id):
session = db.session()
qry = session.query(Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
"superset/request_access/?" f"dashboard_id={dash.id}&"
)
dash_edit_perm = check_ownership(
dash, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
dash_save_perm = security_manager.can_access("can_save_dash", "Superset")
superset_can_explore = security_manager.can_access("can_explore", "Superset")
superset_can_csv = security_manager.can_access("can_csv", "Superset")
slice_can_edit = security_manager.can_access("can_edit", "SliceModelView")
standalone_mode = (
request.args.get(utils.ReservedUrlParameters.STANDALONE.value) == "true"
)
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
@event_logger.log_this
def dashboard(**kwargs):
pass
dashboard(
dashboard_id=dash.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
dashboard_data = dash.data
dashboard_data.update(
{
"standalone_mode": standalone_mode,
"dash_save_perm": dash_save_perm,
"dash_edit_perm": dash_edit_perm,
"superset_can_explore": superset_can_explore,
"superset_can_csv": superset_can_csv,
"slice_can_edit": slice_can_edit,
}
)
url_params = {
key: value
for key, value in request.args.items()
if key not in [param.value for param in utils.ReservedUrlParameters]
}
bootstrap_data = {
"user_id": g.user.get_id(),
"dashboard_data": dashboard_data,
"datasources": {ds.uid: ds.data for ds in datasources},
"common": common_bootstrap_payload(),
"editMode": edit_mode,
"urlParams": url_params,
}
if request.args.get("json") == "true":
return json_success(
json.dumps(bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser)
)
return self.render_template(
"superset/dashboard.html",
entry="dashboard",
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
)
@api
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self):
return Response(status=200)
@has_access
@expose("/sync_druid/", methods=["POST"])
@event_logger.log_this
def sync_druid_source(self):
payload = request.get_json(force=True)
druid_config = payload["config"]
user_name = payload["user"]
cluster_name = payload["cluster"]
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources["druid"]
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __(
"Can't find User '%(name)s', please ask your admin " "to create one.",
name=user_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name=cluster_name)
.one_or_none()
)
if not cluster:
err_msg = __(
"Can't find DruidCluster with cluster_name = " "'%(name)s'",
name=cluster_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
except Exception as e:
logger.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources["table"]
data = json.loads(request.form.get("data"))
table_name = data.get("datasourceName")
database_id = data.get("dbId")
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
q = ParsedQuery(data.get("sql"))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get("columns"):
column_name = config.get("name")
SqlaTable = ConnectorRegistry.sources["table"]
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get("is_date", False),
type=config.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose("/select_star/<database_id>/<table_name>")
@expose("/select_star/<database_id>/<table_name>/<schema>")
@event_logger.log_this
def select_star(self, database_id, table_name, schema=None):
logging.warning(
f"{self.__class__.__name__}.select_star "
"This API endpoint is deprecated and will be removed in version 1.0.0"
)
stats_logger.incr(f"{self.__class__.__name__}.select_star.init")
database = db.session.query(models.Database).get(database_id)
if not database:
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.database_not_found"
)
return json_error_response("Not found", 404)
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name)
if not self.appbuilder.sm.can_access_datasource(database, table_name, schema):
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.permission_denied"
)
logging.warning(
f"Permission denied for user {g.user} on table: {table_name} "
f"schema: {schema}"
)
return json_error_response("Not found", 404)
stats_logger.incr(f"deprecated.{self.__class__.__name__}.select_star.success")
return json_success(
database.select_star(
table_name, schema, latest_partition=True, show_cols=True
)
)
@has_access_api
@expose("/estimate_query_cost/<database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost(
self, database_id: int, schema: Optional[str] = None
) -> Response:
mydb = db.session.query(models.Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.sources.get("sql_lab")
)
except SupersetTimeoutException as e:
logger.exception(e)
return json_error_response(timeout_msg)
except Exception as e:
return json_error_response(str(e))
spec = mydb.db_engine_spec
query_cost_formatters = get_feature_flags().get(
"QUERY_COST_FORMATTERS_BY_ENGINE", {}
)
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self):
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key):
return self.results_exec(key)
def results_exec(self, key: str):
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables), status=403
)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
obj: dict = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
return json_success(
json.dumps(obj, default=utils.json_iso_dttm_ser, ignore_nan=True)
)
@has_access_api
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self):
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.error(
f"Query with client_id {client_id} could not be stopped: query already complete"
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@expose("/validate_sql_json/", methods=["POST", "GET"])
@event_logger.log_this
def validate_sql_json(self):
sql = request.form.get("sql")
database_id = request.form.get("database_id")
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as e:
logger.exception(e)
msg = _(
f"{validator.name} was unable to check your query.\n"
"Please recheck your query.\n"
f"Exception: {e}"
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(e)):
return json_error_response(f"{msg}", status=400)
else:
return json_error_response(f"{msg}")
def _sql_json_async(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
logger.info(f"Query {query.id}: Running query on a Celery worker")
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query.id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response("{}".format(msg))
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
def _sql_json_sync(
self,
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> str:
try:
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username if g.user else None,
expand_data=expand_data,
log_params=log_params,
)
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logger.exception(f"Query {query.id}: {e}")
return json_error_response(f"{{e}}")
if data.get("status") == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access_api
@expose("/sql_json/", methods=["POST"])
@event_logger.log_this
def sql_json(self):
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
return self.sql_json_exec(request.json, log_params)
def sql_json_exec(
self, query_params: dict, log_params: Optional[Dict[str, Any]] = None
):
# Collect Values
database_id: int = cast(int, query_params.get("database_id"))
schema: str = cast(str, query_params.get("schema"))
sql: str = cast(str, query_params.get("sql"))
try:
template_params: dict = json.loads(
query_params.get("templateParams") or "{}"
)
except json.JSONDecodeError:
logger.warning(
f"Invalid template parameter {query_params.get('templateParams')}"
" specified. Defaulting to empty dict"
)
template_params = {}
limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"]
async_flag: bool = cast(bool, query_params.get("runAsync"))
if limit < 0:
logger.warning(
f"Invalid limit of {limit} specified. Defaulting to max limit."
)
limit = 0
select_as_cta: bool = cast(bool, query_params.get("select_as_cta"))
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(models.Database).get(database_id)
if not mydb:
return json_error_response(f"Database with id {database_id} is missing.")
# Set tmp_table_name for CTA
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = f"{mydb.force_ctas_schema}.{tmp_table_name}"
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as e:
logger.error(f"Errors saving query details {e}")
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info(f"Triggering query_id: {query_id}")
rejected_tables = security_manager.rejected_tables(sql, mydb, schema)
if rejected_tables:
query.status = QueryStatus.FAILED
session.commit()
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403,
)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except Exception as e:
error_msg = utils.error_msg_from_exception(e)
return json_error_response(
f"Query {query_id}: Template rendering failed: {error_msg}"
)
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
query.limit = min(lim for lim in limits if lim is not None)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
if async_flag:
return self._sql_json_async(
session, rendered_query, query, expand_data, log_params
)
return self._sql_json_sync(
session, rendered_query, query, expand_data, log_params
)
@has_access
@expose("/csv/<client_id>")
@event_logger.log_this
def csv(self, client_id):
logger.info("Exporting CSV file [{}]".format(client_id))
query = db.session.query(Query).filter_by(client_id=client_id).one()
rejected_tables = security_manager.rejected_tables(
query.sql, query.database, query.schema
)
if rejected_tables:
flash(security_manager.get_table_access_error_msg(rejected_tables))
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info(
"Fetching CSV from results backend " "[{}]".format(query.results_key)
)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, results_backend_use_msgpack
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
else:
logger.info("Running a query to turn into CSV")
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
csv = df.to_csv(index=False, **config["CSV_EXPORT"])
response = Response(csv, mimetype="text/csv")
response.headers[
"Content-Disposition"
] = f"attachment; filename={query.name}.csv"
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
logger.info(
f"CSV exported: {repr(event_info)}", extra={"superset_event": event_info}
)
return response
@api
@handle_api_exception
@has_access
@expose("/fetch_datasource_metadata")
@event_logger.log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = request.args.get("datasourceKey").split("__")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
return json_success(json.dumps(datasource.data))
@has_access_api
@expose("/queries/<last_updated_ms>")
def queries(self, last_updated_ms):
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
return self.queries_exec(last_updated_ms_int)
def queries_exec(self, last_updated_ms_int: int):
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose("/search_queries")
@event_logger.log_this
def search_queries(self) -> Response:
query = db.session.query(Query)
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif (
request.args.get("user_id") is not None
and request.args.get("user_id") != g.user.get_user_id()
):
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
from_time = request.args.get("from")
to_time = request.args.get("to")
if search_user_id:
query = query.filter(Query.user_id == search_user_id)
if database_id:
query = query.filter(Query.database_id == database_id)
if status:
query = query.filter(Query.status == status)
if search_text:
query = query.filter(Query.sql.like("%{}%".format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self):
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@expose("/welcome")
def welcome(self):
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/welcome.html",
entry="welcome",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@expose("/profile/<username>/")
def profile(self, username):
if not username and g.user:
username = g.user.username
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_payload(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [tab_state[0] for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {}
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
databases = {
database.id: {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
for database in db.session.query(models.Database).all()
}
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.cast(Integer).in_(tab_state_ids))
.all()
)
queries = {
query.client_id: {k: v for k, v in query.to_dict().items()}
for query in user_queries
}
return {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@expose("/sqllab")
def sqllab(self):
payload = self._get_sqllab_payload(g.user.get_id())
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@api
@handle_api_exception
@has_access_api
@expose("/slice_query/<slice_id>/")
def slice_query(self, slice_id):
viz_obj = get_viz(slice_id)
security_manager.assert_viz_permission(viz_obj)
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose("/schemas_access_for_csv_upload")
def schemas_access_for_csv_upload(self):
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your csv upload")
db_id = int(request.args.get("db_id"))
database = db.session.query(models.Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (
security_manager.database_access(database)
or security_manager.all_datasource_access()
):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as e:
logger.exception(e)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("CSS Templates")
show_title = _("Show CSS Template")
add_title = _("Add CSS Template")
edit_title = _("Edit CSS Template")
list_columns = ["template_name"]
edit_columns = ["template_name", "css"]
add_columns = edit_columns
label_columns = {"template_name": _("Template Name")}
class CssTemplateAsyncModelView(CssTemplateModelView):
include_route_methods = {RouteMethod.API_READ}
list_columns = ["template_name", "css"]
@app.after_request
def apply_http_headers(response: Response):
# HTTP_HEADERS is deprecated, this provides backwards compatibility
response.headers.extend(
{**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]}
)
for k, v in config["DEFAULT_HTTP_HEADERS"].items():
if k not in response.headers:
response.headers[k] = v
return response
| true
| true
|
1c4641077fa1b4a1700437711e9267173cfd5410
| 160
|
py
|
Python
|
lights/gridlight_off.py
|
bprevost/brad_demo
|
7c071709f763627d870e2b9e55be332e6af5f4c3
|
[
"MIT"
] | null | null | null |
lights/gridlight_off.py
|
bprevost/brad_demo
|
7c071709f763627d870e2b9e55be332e6af5f4c3
|
[
"MIT"
] | null | null | null |
lights/gridlight_off.py
|
bprevost/brad_demo
|
7c071709f763627d870e2b9e55be332e6af5f4c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import board
import neopixel
NUMPIXELS = 50
pixels = neopixel.NeoPixel(board.D21, NUMPIXELS)
pixels.fill((0, 0, 0)) # Turn off pixels
| 16
| 48
| 0.73125
|
import board
import neopixel
NUMPIXELS = 50
pixels = neopixel.NeoPixel(board.D21, NUMPIXELS)
pixels.fill((0, 0, 0))
| true
| true
|
1c4641397d7bb7c30bef7cad7ee43801ba62d268
| 2,251
|
py
|
Python
|
dmgui_au/utilities/find_dropbox.py
|
Swanson-Hysell-Group/demag_gui_au
|
d1a233a82ec52dd5907bfee6885668a8c84ae892
|
[
"BSD-3-Clause"
] | null | null | null |
dmgui_au/utilities/find_dropbox.py
|
Swanson-Hysell-Group/demag_gui_au
|
d1a233a82ec52dd5907bfee6885668a8c84ae892
|
[
"BSD-3-Clause"
] | null | null | null |
dmgui_au/utilities/find_dropbox.py
|
Swanson-Hysell-Group/demag_gui_au
|
d1a233a82ec52dd5907bfee6885668a8c84ae892
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
try:
import json
except:
pass
def find_dropbox():
"""
Attempts to find local Dropbox folder using json file that Dropbox writes to
users' home directory. Will additionally search for `Hargraves_Data` folder
in the top directory (UC Berkeley Pmag Lab).
Returns
-------
string
Absolute path to Dropbox folder or subfolder, or another path given by
user input. If
"""
if os.path.isfile(os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))):
drpbx_info_file = os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))
drpbx_info = open(drpbx_info_file, 'r')
drpbx_json = drpbx_info.read()
drpbx_info.close()
try:
drpbx_dict = json.loads(drpbx_json)
except:
drpbx_dict = dict(eval(drpbx_json.replace('false','False').replace('true','True')))
finally:
drpbx_acts = list(drpbx_dict.keys())
if len(drpbx_acts)>1:
print("Found multiple Dropbox accounts:")
for i,j in enumerate(drpbx_acts):
print("[", i,"]", j)
n = input("Which account to use? [index number]: ")
drpbx_dict = drpbx_dict[drpbx_acts[n]]
else:
drpbx_dict = drpbx_dict[drpbx_acts[0]]
drpbx_path = os.path.abspath(drpbx_dict['path'])
else:
drpbx_path = ''
print("-W- There was a problem finding your Dropbox folder.")
return drpbx_path
# while not os.path.isdir(drpbx_path):
# drpbx_path = input("Please provide the path to your Dropbox, "
# "or press [Enter] to skip and provide a d.\n> ")
# if not drpbx_path:
# print("-E- Failed to find Dropbox folder")
# return drpbx_path
# elif os.path.isdir(os.path.realpath(os.path.expanduser(drpbx_path))):
# for UC Berkeley lab
if os.path.isdir(os.path.join(drpbx_path,"Hargraves_Data")):
drpbx_path = os.path.join(drpbx_path,"Hargraves_Data")
return drpbx_path
if __name__ == "__main__":
find_dropbox()
| 34.106061
| 95
| 0.58685
|
import os
import sys
import shutil
try:
import json
except:
pass
def find_dropbox():
if os.path.isfile(os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))):
drpbx_info_file = os.path.expanduser(os.path.join("~", ".dropbox", "info.json"))
drpbx_info = open(drpbx_info_file, 'r')
drpbx_json = drpbx_info.read()
drpbx_info.close()
try:
drpbx_dict = json.loads(drpbx_json)
except:
drpbx_dict = dict(eval(drpbx_json.replace('false','False').replace('true','True')))
finally:
drpbx_acts = list(drpbx_dict.keys())
if len(drpbx_acts)>1:
print("Found multiple Dropbox accounts:")
for i,j in enumerate(drpbx_acts):
print("[", i,"]", j)
n = input("Which account to use? [index number]: ")
drpbx_dict = drpbx_dict[drpbx_acts[n]]
else:
drpbx_dict = drpbx_dict[drpbx_acts[0]]
drpbx_path = os.path.abspath(drpbx_dict['path'])
else:
drpbx_path = ''
print("-W- There was a problem finding your Dropbox folder.")
return drpbx_path
if os.path.isdir(os.path.join(drpbx_path,"Hargraves_Data")):
drpbx_path = os.path.join(drpbx_path,"Hargraves_Data")
return drpbx_path
if __name__ == "__main__":
find_dropbox()
| true
| true
|
1c46440c615bf74cb4301b1593107054081dbfd6
| 440
|
py
|
Python
|
venv/Scripts/easy_install-script.py
|
TG-Techie/HackUMass0111
|
603344064605979b85a2e142caf7a2a7439d60f5
|
[
"MIT"
] | null | null | null |
venv/Scripts/easy_install-script.py
|
TG-Techie/HackUMass0111
|
603344064605979b85a2e142caf7a2a7439d60f5
|
[
"MIT"
] | 1
|
2019-10-19T09:24:56.000Z
|
2019-10-20T05:37:06.000Z
|
venv/Scripts/easy_install-script.py
|
TG-Techie/HackUMass0111
|
603344064605979b85a2e142caf7a2a7439d60f5
|
[
"MIT"
] | 1
|
2019-10-18T14:18:28.000Z
|
2019-10-18T14:18:28.000Z
|
#!C:\Users\danhi\hackumass0111\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| 33.846154
| 83
| 0.693182
|
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| true
| true
|
1c464563ae1c020a956ead49bce39b9e88737950
| 223
|
py
|
Python
|
cannes_accomodation/tests/test_accomodation.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
cannes_accomodation/tests/test_accomodation.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
cannes_accomodation/tests/test_accomodation.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
class TestAccomodation:
def test_list_accomodation(self, client):
response = client.get('/accomodation')
assert response.status_code == 200
def test_update_accomodation(client):
pass
| 27.875
| 47
| 0.672646
|
class TestAccomodation:
def test_list_accomodation(self, client):
response = client.get('/accomodation')
assert response.status_code == 200
def test_update_accomodation(client):
pass
| true
| true
|
1c4645726b27358a00869176f220b50c08f8f957
| 7,264
|
py
|
Python
|
client/log.py
|
diophung/pyre-check
|
a488698d86b06b550c0e6e133009c1f396925af2
|
[
"MIT"
] | null | null | null |
client/log.py
|
diophung/pyre-check
|
a488698d86b06b550c0e6e133009c1f396925af2
|
[
"MIT"
] | null | null | null |
client/log.py
|
diophung/pyre-check
|
a488698d86b06b550c0e6e133009c1f396925af2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List # noqa
LOG = logging.getLogger(__name__)
PERFORMANCE = 15
PROMPT = 50
SUCCESS = 60
stdout = io.StringIO()
class Color:
YELLOW = "\033[33m"
RED = "\033[31m"
class Format:
BOLD = "\033[1m"
CLEAR_LINE = "\x1b[0G\x1b[K"
CLEAR = "\033[0m"
TRUNCATE_OVERFLOW = "\033[?7l"
WRAP_OVERFLOW = "\033[?7h"
NEWLINE = "\n"
CURSOR_UP_LINE = "\x1b[1A"
HIDE_CURSOR = "\x1b[?25l"
SHOW_CURSOR = "\x1b[?25h"
class Character:
LAMBDA = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record):
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"]
_terminate = False # type: bool
_last_update = 0.0 # type: float
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = ""
self.setLevel(logging.INFO)
self._record = None
self._last_record = None
self._active_lines = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self):
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record, age=None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler] # type: List[logging.Handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1
_flushed = False # type: bool
def __init__(self, section, data) -> None:
self._section = section
self._data = data
self._lock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
| 27.938462
| 87
| 0.58549
|
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List
LOG = logging.getLogger(__name__)
PERFORMANCE = 15
PROMPT = 50
SUCCESS = 60
stdout = io.StringIO()
class Color:
YELLOW = "\033[33m"
RED = "\033[31m"
class Format:
BOLD = "\033[1m"
CLEAR_LINE = "\x1b[0G\x1b[K"
CLEAR = "\033[0m"
TRUNCATE_OVERFLOW = "\033[?7l"
WRAP_OVERFLOW = "\033[?7h"
NEWLINE = "\n"
CURSOR_UP_LINE = "\x1b[1A"
HIDE_CURSOR = "\x1b[?25l"
SHOW_CURSOR = "\x1b[?25h"
class Character:
LAMBDA = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record):
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD = 0.5
LINE_BREAKING_LEVELS = ["ERROR", "WARNING", "SUCCESS"]
_terminate = False
_last_update = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator = ""
self.setLevel(logging.INFO)
self._record = None
self._last_record = None
self._active_lines = 0
sys.stderr.write(
Format.NEWLINE
+ Format.TRUNCATE_OVERFLOW
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self):
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record, age=None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
if self._record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(self._record, age)
time.sleep(0.1)
def terminate(self) -> None:
last_record = self._last_record
if last_record and last_record.levelname not in self.LINE_BREAKING_LEVELS:
sys.stderr.write("\n")
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
self._terminate = True
def initialize(arguments) -> None:
if arguments.noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
arguments.timed_stream_handler = None
else:
stream_handler = TimedStreamHandler()
arguments.timed_stream_handler = stream_handler
handlers = [stream_handler]
if not arguments.noninteractive:
try:
os.mkdir(".pyre")
except FileExistsError:
pass
file_handler = logging.FileHandler(".pyre/pyre.stderr")
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup(arguments) -> None:
if arguments.timed_stream_handler:
arguments.timed_stream_handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD = 0.1
_flushed = False
def __init__(self, section, data) -> None:
self._section = section
self._data = data
self._lock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
| true
| true
|
1c4645c2167dbdc5384e8afc9db3098d68ffbf3f
| 3,666
|
py
|
Python
|
scripts/launch_test.py
|
amarildolikmeta/oac-explore
|
e3d63992a4ff33c8df593941f498457e94f81eb8
|
[
"MIT"
] | null | null | null |
scripts/launch_test.py
|
amarildolikmeta/oac-explore
|
e3d63992a4ff33c8df593941f498457e94f81eb8
|
[
"MIT"
] | null | null | null |
scripts/launch_test.py
|
amarildolikmeta/oac-explore
|
e3d63992a4ff33c8df593941f498457e94f81eb8
|
[
"MIT"
] | 1
|
2021-12-13T15:38:41.000Z
|
2021-12-13T15:38:41.000Z
|
import json
import sys
sys.path.append("../")
from trainer.particle_trainer import ParticleTrainer
from trainer.gaussian_trainer import GaussianTrainer
from trainer.trainer import SACTrainer
import numpy as np
import torch
from main import env_producer, get_policy_producer, get_q_producer
from utils.pythonplusplus import load_gzip_pickle
ts = '1584884279.5007188'
ts = '1589352957.4422379'
iter = 190
path = '../data/point/sac_/' + ts
ts = '1590677750.0582957'
path = '../data/point/mean_update_counts/p-oac_/' + ts
ts = '1595343877.9346888'
path = '../data/point/hard/terminal/ddpgcounts/p-oac_/no_bias/' + ts
restore = True
variant = json.load(open(path + '/variant.json', 'r'))
domain = variant['domain']
seed = variant['seed']
r_max = variant['r_max']
ensemble = variant['ensemble']
delta = variant['delta']
n_estimators = variant['n_estimators']
if seed == 0:
np.random.seed()
seed = np.random.randint(0, 1000000)
torch.manual_seed(seed)
np.random.seed(seed)
env_args = {}
if domain in ['riverswim']:
env_args['dim'] = variant['dim']
if domain in ['point']:
env_args['difficulty'] = variant['difficulty']
env_args['max_state'] = variant['max_state']
env_args['clip_state'] = variant['clip_state']
env_args['terminal'] = variant['terminal']
expl_env = env_producer(domain, seed, **env_args)
eval_env = env_producer(domain, seed * 10 + 1, **env_args)
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
# Get producer function for policy and value functions
M = variant['layer_size']
N = variant['num_layers']
alg = variant['alg']
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac'] and variant['share_layers']:
output_size = n_estimators
n_estimators = 1
else:
output_size = 1
ob = expl_env.reset()
print(ob)
q_producer = get_q_producer(obs_dim, action_dim, hidden_sizes=[M] * N, output_size=output_size)
policy_producer = get_policy_producer(
obs_dim, action_dim, hidden_sizes=[M] * N)
q_min = variant['r_min'] / (1 - variant['trainer_kwargs']['discount'])
q_max = variant['r_max'] / (1 - variant['trainer_kwargs']['discount'])
alg_to_trainer = {
'sac': SACTrainer,
'oac': SACTrainer,
'p-oac': ParticleTrainer,
'g-oac': GaussianTrainer
}
trainer = alg_to_trainer[variant['alg']]
kwargs ={ }
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac']:
n_estimators = variant['n_estimators']
kwargs = dict(
n_estimators=n_estimators,
delta=variant['delta'],
q_min=q_min,
q_max=q_max,
ensemble=variant['ensemble'],
n_policies=variant['n_policies'],
)
kwargs.update(dict(
policy_producer=policy_producer,
q_producer=q_producer,
action_space=expl_env.action_space,
))
print(kwargs)
kwargs.update(variant['trainer_kwargs'])
trainer = trainer(**kwargs)
# try:
# experiment = path + '/best.zip_pkl'
# exp = load_gzip_pickle(experiment)
# print(exp['epoch'])
# trainer.restore_from_snapshot(exp['trainer'])
# except:
experiment = path + '/params.zip_pkl'
exp = load_gzip_pickle(experiment)
print(exp['epoch'])
trainer.restore_from_snapshot(exp['trainer'])
for i in range(10):
s = expl_env.reset()
done = False
ret = 0
t = 0
while not done and t < 400:
expl_env.render()
if hasattr(trainer, 'target_policy'):
a, agent_info = trainer.target_policy.get_action(s, deterministic=True)
else:
a, agent_info = trainer.policy.get_action(s, deterministic=True)
s, r, done, _ = expl_env.step(a)
t += 1
ret += r
expl_env.render()
print("Return: ", ret)
input()
| 30.04918
| 95
| 0.678396
|
import json
import sys
sys.path.append("../")
from trainer.particle_trainer import ParticleTrainer
from trainer.gaussian_trainer import GaussianTrainer
from trainer.trainer import SACTrainer
import numpy as np
import torch
from main import env_producer, get_policy_producer, get_q_producer
from utils.pythonplusplus import load_gzip_pickle
ts = '1584884279.5007188'
ts = '1589352957.4422379'
iter = 190
path = '../data/point/sac_/' + ts
ts = '1590677750.0582957'
path = '../data/point/mean_update_counts/p-oac_/' + ts
ts = '1595343877.9346888'
path = '../data/point/hard/terminal/ddpgcounts/p-oac_/no_bias/' + ts
restore = True
variant = json.load(open(path + '/variant.json', 'r'))
domain = variant['domain']
seed = variant['seed']
r_max = variant['r_max']
ensemble = variant['ensemble']
delta = variant['delta']
n_estimators = variant['n_estimators']
if seed == 0:
np.random.seed()
seed = np.random.randint(0, 1000000)
torch.manual_seed(seed)
np.random.seed(seed)
env_args = {}
if domain in ['riverswim']:
env_args['dim'] = variant['dim']
if domain in ['point']:
env_args['difficulty'] = variant['difficulty']
env_args['max_state'] = variant['max_state']
env_args['clip_state'] = variant['clip_state']
env_args['terminal'] = variant['terminal']
expl_env = env_producer(domain, seed, **env_args)
eval_env = env_producer(domain, seed * 10 + 1, **env_args)
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
M = variant['layer_size']
N = variant['num_layers']
alg = variant['alg']
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac'] and variant['share_layers']:
output_size = n_estimators
n_estimators = 1
else:
output_size = 1
ob = expl_env.reset()
print(ob)
q_producer = get_q_producer(obs_dim, action_dim, hidden_sizes=[M] * N, output_size=output_size)
policy_producer = get_policy_producer(
obs_dim, action_dim, hidden_sizes=[M] * N)
q_min = variant['r_min'] / (1 - variant['trainer_kwargs']['discount'])
q_max = variant['r_max'] / (1 - variant['trainer_kwargs']['discount'])
alg_to_trainer = {
'sac': SACTrainer,
'oac': SACTrainer,
'p-oac': ParticleTrainer,
'g-oac': GaussianTrainer
}
trainer = alg_to_trainer[variant['alg']]
kwargs ={ }
if alg in ['p-oac', 'g-oac', 'g-tsac', 'p-tsac']:
n_estimators = variant['n_estimators']
kwargs = dict(
n_estimators=n_estimators,
delta=variant['delta'],
q_min=q_min,
q_max=q_max,
ensemble=variant['ensemble'],
n_policies=variant['n_policies'],
)
kwargs.update(dict(
policy_producer=policy_producer,
q_producer=q_producer,
action_space=expl_env.action_space,
))
print(kwargs)
kwargs.update(variant['trainer_kwargs'])
trainer = trainer(**kwargs)
experiment = path + '/params.zip_pkl'
exp = load_gzip_pickle(experiment)
print(exp['epoch'])
trainer.restore_from_snapshot(exp['trainer'])
for i in range(10):
s = expl_env.reset()
done = False
ret = 0
t = 0
while not done and t < 400:
expl_env.render()
if hasattr(trainer, 'target_policy'):
a, agent_info = trainer.target_policy.get_action(s, deterministic=True)
else:
a, agent_info = trainer.policy.get_action(s, deterministic=True)
s, r, done, _ = expl_env.step(a)
t += 1
ret += r
expl_env.render()
print("Return: ", ret)
input()
| true
| true
|
1c464629dbe7ff667eaf19f42c16ee577f2ed4fd
| 1,277
|
py
|
Python
|
Echoo/echoo.py
|
UsedToBe97/Echoo
|
b08069170bf470415b9fd91fcb943214b69805b8
|
[
"MIT"
] | null | null | null |
Echoo/echoo.py
|
UsedToBe97/Echoo
|
b08069170bf470415b9fd91fcb943214b69805b8
|
[
"MIT"
] | null | null | null |
Echoo/echoo.py
|
UsedToBe97/Echoo
|
b08069170bf470415b9fd91fcb943214b69805b8
|
[
"MIT"
] | null | null | null |
# import logging
# logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.INFO)
import os
import argparse
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
def main(token, chat_id, msg):
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=msg)
def run():
parser = argparse.ArgumentParser(description=r'''Echoo:: A tool let's your program echo.''')
parser.add_argument("-t", "--token", default=None, type=str, help="Token for your bot.")
parser.add_argument("-id", "--chat_id", default=None, type=str, help="Chat_id of your audience.")
parser.add_argument("msg", default="Are u ok?", type=str, help="Message to send")
args = parser.parse_args()
if args.token is None:
try:
args.token = os.environ["TG_TOKEN"]
except KeyError:
raise KeyError("Neither --token nor TG_TOKEN is set.")
if args.chat_id is None:
try:
args.chat_id = os.environ["TG_CHAT_ID"]
except KeyError:
raise KeyError("Neither --chat_id nor TG_CHAT_ID is set.")
main(token=args.token, chat_id=args.chat_id, msg=args.msg)
if __name__ == '__main__':
run()
| 30.404762
| 101
| 0.648395
|
import os
import argparse
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
def main(token, chat_id, msg):
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=msg)
def run():
parser = argparse.ArgumentParser(description=r'''Echoo:: A tool let's your program echo.''')
parser.add_argument("-t", "--token", default=None, type=str, help="Token for your bot.")
parser.add_argument("-id", "--chat_id", default=None, type=str, help="Chat_id of your audience.")
parser.add_argument("msg", default="Are u ok?", type=str, help="Message to send")
args = parser.parse_args()
if args.token is None:
try:
args.token = os.environ["TG_TOKEN"]
except KeyError:
raise KeyError("Neither --token nor TG_TOKEN is set.")
if args.chat_id is None:
try:
args.chat_id = os.environ["TG_CHAT_ID"]
except KeyError:
raise KeyError("Neither --chat_id nor TG_CHAT_ID is set.")
main(token=args.token, chat_id=args.chat_id, msg=args.msg)
if __name__ == '__main__':
run()
| true
| true
|
1c464866312c86c67ec166f6a47982af30b5e1bc
| 9,752
|
py
|
Python
|
src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 2
|
2021-04-27T17:18:17.000Z
|
2021-04-27T19:14:39.000Z
|
src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | null | null | null |
src/v5.1/resources/swagger_client/models/tpdm_teacher_candidate_academic_record_reference.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:43:11.000Z
|
2022-01-06T09:43:11.000Z
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmTeacherCandidateAcademicRecordReference(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'education_organization_id': 'int',
'school_year': 'int',
'teacher_candidate_identifier': 'str',
'term_descriptor': 'str',
'link': 'Link'
}
attribute_map = {
'education_organization_id': 'educationOrganizationId',
'school_year': 'schoolYear',
'teacher_candidate_identifier': 'teacherCandidateIdentifier',
'term_descriptor': 'termDescriptor',
'link': 'link'
}
def __init__(self, education_organization_id=None, school_year=None, teacher_candidate_identifier=None, term_descriptor=None, link=None, _configuration=None): # noqa: E501
"""TpdmTeacherCandidateAcademicRecordReference - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._education_organization_id = None
self._school_year = None
self._teacher_candidate_identifier = None
self._term_descriptor = None
self._link = None
self.discriminator = None
self.education_organization_id = education_organization_id
self.school_year = school_year
self.teacher_candidate_identifier = teacher_candidate_identifier
self.term_descriptor = term_descriptor
if link is not None:
self.link = link
@property
def education_organization_id(self):
"""Gets the education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The identifier assigned to an education organization. # noqa: E501
:return: The education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: int
"""
return self._education_organization_id
@education_organization_id.setter
def education_organization_id(self, education_organization_id):
"""Sets the education_organization_id of this TpdmTeacherCandidateAcademicRecordReference.
The identifier assigned to an education organization. # noqa: E501
:param education_organization_id: The education_organization_id of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and education_organization_id is None:
raise ValueError("Invalid value for `education_organization_id`, must not be `None`") # noqa: E501
self._education_organization_id = education_organization_id
@property
def school_year(self):
"""Gets the school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The identifier for the school year. # noqa: E501
:return: The school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: int
"""
return self._school_year
@school_year.setter
def school_year(self, school_year):
"""Sets the school_year of this TpdmTeacherCandidateAcademicRecordReference.
The identifier for the school year. # noqa: E501
:param school_year: The school_year of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and school_year is None:
raise ValueError("Invalid value for `school_year`, must not be `None`") # noqa: E501
self._school_year = school_year
@property
def teacher_candidate_identifier(self):
"""Gets the teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
A unique alphanumeric code assigned to a teacher candidate. # noqa: E501
:return: The teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: str
"""
return self._teacher_candidate_identifier
@teacher_candidate_identifier.setter
def teacher_candidate_identifier(self, teacher_candidate_identifier):
"""Sets the teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference.
A unique alphanumeric code assigned to a teacher candidate. # noqa: E501
:param teacher_candidate_identifier: The teacher_candidate_identifier of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and teacher_candidate_identifier is None:
raise ValueError("Invalid value for `teacher_candidate_identifier`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
teacher_candidate_identifier is not None and len(teacher_candidate_identifier) > 32):
raise ValueError("Invalid value for `teacher_candidate_identifier`, length must be less than or equal to `32`") # noqa: E501
self._teacher_candidate_identifier = teacher_candidate_identifier
@property
def term_descriptor(self):
"""Gets the term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
The term for the session during the school year. # noqa: E501
:return: The term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: str
"""
return self._term_descriptor
@term_descriptor.setter
def term_descriptor(self, term_descriptor):
"""Sets the term_descriptor of this TpdmTeacherCandidateAcademicRecordReference.
The term for the session during the school year. # noqa: E501
:param term_descriptor: The term_descriptor of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and term_descriptor is None:
raise ValueError("Invalid value for `term_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
term_descriptor is not None and len(term_descriptor) > 306):
raise ValueError("Invalid value for `term_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._term_descriptor = term_descriptor
@property
def link(self):
"""Gets the link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:return: The link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:rtype: Link
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this TpdmTeacherCandidateAcademicRecordReference.
:param link: The link of this TpdmTeacherCandidateAcademicRecordReference. # noqa: E501
:type: Link
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherCandidateAcademicRecordReference, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return True
return self.to_dict() != other.to_dict()
| 39.642276
| 482
| 0.675759
|
import pprint
import re
import six
from swagger_client.configuration import Configuration
class TpdmTeacherCandidateAcademicRecordReference(object):
swagger_types = {
'education_organization_id': 'int',
'school_year': 'int',
'teacher_candidate_identifier': 'str',
'term_descriptor': 'str',
'link': 'Link'
}
attribute_map = {
'education_organization_id': 'educationOrganizationId',
'school_year': 'schoolYear',
'teacher_candidate_identifier': 'teacherCandidateIdentifier',
'term_descriptor': 'termDescriptor',
'link': 'link'
}
def __init__(self, education_organization_id=None, school_year=None, teacher_candidate_identifier=None, term_descriptor=None, link=None, _configuration=None):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._education_organization_id = None
self._school_year = None
self._teacher_candidate_identifier = None
self._term_descriptor = None
self._link = None
self.discriminator = None
self.education_organization_id = education_organization_id
self.school_year = school_year
self.teacher_candidate_identifier = teacher_candidate_identifier
self.term_descriptor = term_descriptor
if link is not None:
self.link = link
@property
def education_organization_id(self):
return self._education_organization_id
@education_organization_id.setter
def education_organization_id(self, education_organization_id):
if self._configuration.client_side_validation and education_organization_id is None:
raise ValueError("Invalid value for `education_organization_id`, must not be `None`")
self._education_organization_id = education_organization_id
@property
def school_year(self):
return self._school_year
@school_year.setter
def school_year(self, school_year):
if self._configuration.client_side_validation and school_year is None:
raise ValueError("Invalid value for `school_year`, must not be `None`")
self._school_year = school_year
@property
def teacher_candidate_identifier(self):
return self._teacher_candidate_identifier
@teacher_candidate_identifier.setter
def teacher_candidate_identifier(self, teacher_candidate_identifier):
if self._configuration.client_side_validation and teacher_candidate_identifier is None:
raise ValueError("Invalid value for `teacher_candidate_identifier`, must not be `None`")
if (self._configuration.client_side_validation and
teacher_candidate_identifier is not None and len(teacher_candidate_identifier) > 32):
raise ValueError("Invalid value for `teacher_candidate_identifier`, length must be less than or equal to `32`")
self._teacher_candidate_identifier = teacher_candidate_identifier
@property
def term_descriptor(self):
return self._term_descriptor
@term_descriptor.setter
def term_descriptor(self, term_descriptor):
if self._configuration.client_side_validation and term_descriptor is None:
raise ValueError("Invalid value for `term_descriptor`, must not be `None`")
if (self._configuration.client_side_validation and
term_descriptor is not None and len(term_descriptor) > 306):
raise ValueError("Invalid value for `term_descriptor`, length must be less than or equal to `306`")
self._term_descriptor = term_descriptor
@property
def link(self):
return self._link
@link.setter
def link(self, link):
self._link = link
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherCandidateAcademicRecordReference, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, TpdmTeacherCandidateAcademicRecordReference):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
1c464912ecb97ea85dc0a43a1776142eb3f9360b
| 1,613
|
py
|
Python
|
sip/examples/flask_processing_controller/app/api/subarray_list.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 3
|
2016-11-08T02:27:05.000Z
|
2018-01-22T13:26:11.000Z
|
sip/examples/flask_processing_controller/app/api/subarray_list.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 87
|
2016-11-24T11:09:01.000Z
|
2021-03-25T22:23:59.000Z
|
sip/examples/flask_processing_controller/app/api/subarray_list.py
|
SKA-ScienceDataProcessor/integration-prototype
|
5875dc0489f707232534ce75daf3707f909bcd15
|
[
"BSD-3-Clause"
] | 10
|
2016-05-18T09:41:36.000Z
|
2019-07-04T10:19:24.000Z
|
# -*- coding: utf-8 -*-
"""Sub array route"""
import logging
from flask import Blueprint, request
from flask_api import status
from .utils import get_root_url, missing_db_response
from config_db import SchedulingBlockDbClient
BP = Blueprint('subarray_list:', __name__)
DB = SchedulingBlockDbClient()
LOG = logging.getLogger('SIP.EC.PCI')
@BP.route('/subarrays', methods=['GET'])
@missing_db_response
def get():
"""Subarray list.
This method will list all sub-arrays known to SDP.
"""
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK
@BP.route('/subarrays/schedule', methods=['POST'])
@missing_db_response
def post():
"""Generate a SBI."""
_url = get_root_url()
LOG.debug("POST subarray SBI.")
# TODO(BM) generate sbi_config .. see report ...
# ... will need to add this as a util function on the db...
sbi_config = {}
DB.add_sbi(sbi_config)
response = dict()
return response, status.HTTP_200_OK
| 27.810345
| 63
| 0.67018
|
import logging
from flask import Blueprint, request
from flask_api import status
from .utils import get_root_url, missing_db_response
from config_db import SchedulingBlockDbClient
BP = Blueprint('subarray_list:', __name__)
DB = SchedulingBlockDbClient()
LOG = logging.getLogger('SIP.EC.PCI')
@BP.route('/subarrays', methods=['GET'])
@missing_db_response
def get():
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK
@BP.route('/subarrays/schedule', methods=['POST'])
@missing_db_response
def post():
_url = get_root_url()
LOG.debug("POST subarray SBI.")
sbi_config = {}
DB.add_sbi(sbi_config)
response = dict()
return response, status.HTTP_200_OK
| true
| true
|
1c46491789a2b206ec7a467f93eaa6eeb029b3c1
| 4,899
|
py
|
Python
|
train_face_recognition.py
|
JustinWingChungHui/okkindred_facial_recognition
|
e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1
|
[
"MIT"
] | null | null | null |
train_face_recognition.py
|
JustinWingChungHui/okkindred_facial_recognition
|
e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1
|
[
"MIT"
] | 5
|
2019-10-21T20:33:13.000Z
|
2022-03-12T00:00:19.000Z
|
train_face_recognition.py
|
JustinWingChungHui/okkindred_facial_recognition
|
e6744e604d0bf25f9024a2ef2ba7ca9d0760c8b1
|
[
"MIT"
] | null | null | null |
# https://github.com/ageitgey/face_recognition/blob/master/examples/face_recognition_knn.py
import math
import os
import pickle
from PIL import Image as PilImage
from sklearn import neighbors
from models import Person, Image, Tag, FaceModel
from secrets import TRAIN_FACE_RECOGNITION_TEMP_DIR
from file_downloader import download_file, clear_directory
import face_recognition
def get_file_for_tag(tag, image, session, dir_name):
'''
Gets file for tag and image
'''
print(' = Processing Tag and Image =')
print(' tag.id: {}'.format(tag.id))
print(' image.id: {}'.format(image.id))
file = download_file(dir_name, image.large_thumbnail)
print(' Opening Image')
original = PilImage.open(file)
print(' Cropping image')
left = tag.x1 * image.large_thumbnail_width
right = tag.x2 * image.large_thumbnail_width
top = tag.y1 * image.large_thumbnail_height
bottom = tag.y2 * image.large_thumbnail_height
cropped = original.crop((left, top, right, bottom))
cropped.save(file)
return file
def process_person(person, session, X, y):
'''
Processes images for one person
'''
print(' == Processing person name: {0} id: {1} =='.format(person.name, person.id))
dir_name = os.path.join(TRAIN_FACE_RECOGNITION_TEMP_DIR, str(person.id))
print(' Creating directory {}'.format(dir_name))
os.mkdir(dir_name)
files = []
if person.large_thumbnail:
print(' Getting profile photo'.format(dir_name))
files.append(download_file(dir_name, person.large_thumbnail))
print(' Get all face detected tags for person')
tags_and_images = session.query(Tag, Image). \
filter(Tag.person_id == person.id). \
filter(Tag.face_detected == True). \
filter(Tag.image_id == Image.id).all()
print(' Total number of tags: {}'.format(len(tags_and_images)))
for tag, image in tags_and_images:
files.append(get_file_for_tag(tag, image, session, dir_name))
print(' Process Images')
for file in files:
process_file(file, X, y, person.id)
def process_file(file, X, y, person_id):
print(' Creating face encoding for {}'.format(file))
im = face_recognition.load_image_file(file)
face_bounding_boxes = face_recognition.face_locations(im)
# Add face encoding for current image to the training set
if len(face_bounding_boxes) == 1:
print(' Adding face to model')
X.append(face_recognition.face_encodings(im, known_face_locations=face_bounding_boxes)[0])
y.append(person_id)
else:
print(' XXX No Face Found!!! XXX')
def process_family(family_id, session):
'''
Creates a K Nearest neighbour model for a family
'''
print('')
print('===== Processing Family_id: {} ====='.format(family_id))
print('Clearing working directory')
clear_directory(TRAIN_FACE_RECOGNITION_TEMP_DIR)
face_model = FaceModel(family_id = family_id)
print('Get all people for family')
people = session.query(Person).filter(Person.family_id == family_id).all()
print('Total number of people: {}'.format(len(people)))
X = []
y = []
for person in people:
process_person(person, session, X, y)
if (len(X) > 0):
n_neighbors = int(round(math.sqrt(len(X))))
print('Setting n_neighbors to {}'.format(n_neighbors))
print('Creating and training the KNN classifier')
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')
knn_clf.fit(X, y)
print('y:')
print(y)
print('Pickling and saving to db')
face_model.fit_data_faces = pickle.dumps(X)
face_model.fit_data_person_ids = pickle.dumps(y)
face_model.n_neighbors = n_neighbors
face_model.trained_knn_model = pickle.dumps(knn_clf)
session.add(face_model)
session.commit()
else:
print('Not enough data to create model')
#print('#############################################')
#print('')
#print('Connecting to db')
# mysql+mysqldb://<user>:<password>@<host>/<dbname>
#connection_string = 'mysql+mysqldb://{0}:{1}@{2}/{3}'.format(DATABASE['USER'],
# DATABASE['PASSWORD'],
# DATABASE['HOST'],
# DATABASE['NAME'])
#engine = create_engine(connection_string)
#Base.metadata.bind = engine
#DBSession = sessionmaker()
#DBSession.bind = engine
#session = DBSession()
#print('Get all families')
#families = session.query(Family).all()
#print('Total number of families: {}'.format(len(families)))
#for family in families:
# process_family(family.id, session)
| 31.203822
| 116
| 0.633395
|
import math
import os
import pickle
from PIL import Image as PilImage
from sklearn import neighbors
from models import Person, Image, Tag, FaceModel
from secrets import TRAIN_FACE_RECOGNITION_TEMP_DIR
from file_downloader import download_file, clear_directory
import face_recognition
def get_file_for_tag(tag, image, session, dir_name):
print(' = Processing Tag and Image =')
print(' tag.id: {}'.format(tag.id))
print(' image.id: {}'.format(image.id))
file = download_file(dir_name, image.large_thumbnail)
print(' Opening Image')
original = PilImage.open(file)
print(' Cropping image')
left = tag.x1 * image.large_thumbnail_width
right = tag.x2 * image.large_thumbnail_width
top = tag.y1 * image.large_thumbnail_height
bottom = tag.y2 * image.large_thumbnail_height
cropped = original.crop((left, top, right, bottom))
cropped.save(file)
return file
def process_person(person, session, X, y):
print(' == Processing person name: {0} id: {1} =='.format(person.name, person.id))
dir_name = os.path.join(TRAIN_FACE_RECOGNITION_TEMP_DIR, str(person.id))
print(' Creating directory {}'.format(dir_name))
os.mkdir(dir_name)
files = []
if person.large_thumbnail:
print(' Getting profile photo'.format(dir_name))
files.append(download_file(dir_name, person.large_thumbnail))
print(' Get all face detected tags for person')
tags_and_images = session.query(Tag, Image). \
filter(Tag.person_id == person.id). \
filter(Tag.face_detected == True). \
filter(Tag.image_id == Image.id).all()
print(' Total number of tags: {}'.format(len(tags_and_images)))
for tag, image in tags_and_images:
files.append(get_file_for_tag(tag, image, session, dir_name))
print(' Process Images')
for file in files:
process_file(file, X, y, person.id)
def process_file(file, X, y, person_id):
print(' Creating face encoding for {}'.format(file))
im = face_recognition.load_image_file(file)
face_bounding_boxes = face_recognition.face_locations(im)
if len(face_bounding_boxes) == 1:
print(' Adding face to model')
X.append(face_recognition.face_encodings(im, known_face_locations=face_bounding_boxes)[0])
y.append(person_id)
else:
print(' XXX No Face Found!!! XXX')
def process_family(family_id, session):
print('')
print('===== Processing Family_id: {} ====='.format(family_id))
print('Clearing working directory')
clear_directory(TRAIN_FACE_RECOGNITION_TEMP_DIR)
face_model = FaceModel(family_id = family_id)
print('Get all people for family')
people = session.query(Person).filter(Person.family_id == family_id).all()
print('Total number of people: {}'.format(len(people)))
X = []
y = []
for person in people:
process_person(person, session, X, y)
if (len(X) > 0):
n_neighbors = int(round(math.sqrt(len(X))))
print('Setting n_neighbors to {}'.format(n_neighbors))
print('Creating and training the KNN classifier')
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='ball_tree', weights='distance')
knn_clf.fit(X, y)
print('y:')
print(y)
print('Pickling and saving to db')
face_model.fit_data_faces = pickle.dumps(X)
face_model.fit_data_person_ids = pickle.dumps(y)
face_model.n_neighbors = n_neighbors
face_model.trained_knn_model = pickle.dumps(knn_clf)
session.add(face_model)
session.commit()
else:
print('Not enough data to create model')
| true
| true
|
1c4649bc75a615ae3b5e27abb7216ac014db4166
| 36,743
|
py
|
Python
|
sklearn_extensions/model_selection/_search.py
|
ruppinlab/tcga-microbiome-prediction
|
e7923b94738f9bd1b7862bb109002554430d9ace
|
[
"BSD-3-Clause"
] | 3
|
2022-01-11T08:40:37.000Z
|
2022-01-28T08:00:39.000Z
|
sklearn_extensions/model_selection/_search.py
|
ruppinlab/tcga-microbiome-prediction
|
e7923b94738f9bd1b7862bb109002554430d9ace
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn_extensions/model_selection/_search.py
|
ruppinlab/tcga-microbiome-prediction
|
e7923b94738f9bd1b7862bb109002554430d9ace
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T08:44:08.000Z
|
2022-01-11T08:44:08.000Z
|
"""
The :mod:`sklearn_extesions.model_selection._search` includes utilities to
fine-tune the parameters of an estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# Leandro Cruz Hermida <hermidal@cs.umd.edu>
# License: BSD 3 clause
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from itertools import product
import numbers
import time
import warnings
import numpy as np
from joblib import Parallel, delayed
from scipy.stats import rankdata
from sklearn.base import is_classifier, clone
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import (indexable, check_is_fitted,
_check_fit_params)
from ..metrics._scorer import _check_multimetric_scoring
from ..utils.metaestimators import check_routing
from ._validation import _fit_and_score
__all__ = ['ExtendedGridSearchCV']
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 2:
raise ValueError("Parameter array should be one- or "
"two-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class ExtendedBaseSearchCV(BaseSearchCV):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=True,
param_routing=None):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
self.param_routing = param_routing
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
# allows cross-validation to see 'precomputed' metrics
return getattr(self.estimator, '_pairwise', False)
def set_params(self, **params):
super().set_params(**params)
if 'param_routing' in params:
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
return self
def score(self, X, y=None, **score_params):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y, **score_params)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X, **predict_params):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X, **predict_params):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X, **predict_params):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X, **predict_params):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X, **transform_params):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X, **transform_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt, **transform_params):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt, **transform_params)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
Parameters
----------
evaluate_candidates : callable
This callback accepts a list of candidates, where each candidate is
a dict of parameter settings. It returns a dict of all results so
far, formatted like ``cv_results_``.
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def fit(self, X, y=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) or (n_samples,), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
# so feature metadata/properties can work
feature_params = {k: v for k, v in fit_params.items()
if k == 'feature_meta'}
fit_params = {k: v for k, v in fit_params.items()
if k != 'feature_meta'}
X, y, *fit_params_values = indexable(X, y, *fit_params.values())
fit_params = dict(zip(fit_params.keys(), fit_params_values))
fit_params = _check_fit_params(X, fit_params)
(fit_params, cv_params, score_params), remainder = (
self.router(fit_params))
if remainder:
raise TypeError('fit() got unexpected keyword arguments %r'
% sorted(remainder))
n_splits = cv.get_n_splits(X, y, **cv_params)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
score_params=score_params,
feature_params=feature_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
**fit_and_score_kwargs)
for parameters, (train, test)
in product(candidate_params,
cv.split(X, y, **cv_params)))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params, **feature_params)
else:
self.best_estimator_.fit(X, **fit_params, **feature_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
if self.iid != 'deprecated':
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be "
"removed in 0.24.", FutureWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if iid else None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class ExtendedGridSearchCV(ExtendedBaseSearchCV, GridSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=False
If True, return the average score across folds, weighted by the number
of samples in each test set. In this case, the data is assumed to be
identically distributed across the folds, and the loss minimized is
the total loss per sample, and not the mean loss across the folds.
.. deprecated:: 0.22
Parameter ``iid`` is deprecated in 0.22 and will be removed in 0.24
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : boolean, string, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_parameters_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
GridSearchCV(estimator=SVC(),
param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
>>> sorted(clf.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split2_test_score', ...
'std_fit_time', 'std_score_time', 'std_test_score']
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.80 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.70 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.80 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.93 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, scoring=None, n_jobs=None,
iid='deprecated', refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=False, param_routing=None):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score, param_routing=param_routing)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
| 42.924065
| 82
| 0.590398
|
from abc import abstractmethod
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from itertools import product
import numbers
import time
import warnings
import numpy as np
from joblib import Parallel, delayed
from scipy.stats import rankdata
from sklearn.base import is_classifier, clone
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import (indexable, check_is_fitted,
_check_fit_params)
from ..metrics._scorer import _check_multimetric_scoring
from ..utils.metaestimators import check_routing
from ._validation import _fit_and_score
__all__ = ['ExtendedGridSearchCV']
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 2:
raise ValueError("Parameter array should be one- or "
"two-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class ExtendedBaseSearchCV(BaseSearchCV):
@abstractmethod
def __init__(self, estimator, scoring=None, n_jobs=None, iid='deprecated',
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=True,
param_routing=None):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
self.param_routing = param_routing
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
return getattr(self.estimator, '_pairwise', False)
def set_params(self, **params):
super().set_params(**params)
if 'param_routing' in params:
self.router = check_routing(
self.param_routing, ['estimator', 'cv', 'scoring'],
{'cv': {'groups': 'groups', 'weights': 'group_weights'},
'estimator': ['-groups', '-group_weights']})
return self
def score(self, X, y=None, **score_params):
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y, **score_params)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X, **predict_params):
self._check_is_fitted('predict')
return self.best_estimator_.predict(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X, **predict_params):
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X, **predict_params):
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X, **predict_params):
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X, **predict_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X, **transform_params):
self._check_is_fitted('transform')
return self.best_estimator_.transform(X, **transform_params)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt, **transform_params):
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt, **transform_params)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
raise NotImplementedError("_run_search not implemented.")
def fit(self, X, y=None, **fit_params):
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
# so feature metadata/properties can work
feature_params = {k: v for k, v in fit_params.items()
if k == 'feature_meta'}
fit_params = {k: v for k, v in fit_params.items()
if k != 'feature_meta'}
X, y, *fit_params_values = indexable(X, y, *fit_params.values())
fit_params = dict(zip(fit_params.keys(), fit_params_values))
fit_params = _check_fit_params(X, fit_params)
(fit_params, cv_params, score_params), remainder = (
self.router(fit_params))
if remainder:
raise TypeError('fit() got unexpected keyword arguments %r'
% sorted(remainder))
n_splits = cv.get_n_splits(X, y, **cv_params)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
score_params=score_params,
feature_params=feature_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
**fit_and_score_kwargs)
for parameters, (train, test)
in product(candidate_params,
cv.split(X, y, **cv_params)))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params, **feature_params)
else:
self.best_estimator_.fit(X, **fit_params, **feature_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
if self.iid != 'deprecated':
warnings.warn(
"The parameter 'iid' is deprecated in 0.22 and will be "
"removed in 0.24.", FutureWarning
)
iid = self.iid
else:
iid = False
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if iid else None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class ExtendedGridSearchCV(ExtendedBaseSearchCV, GridSearchCV):
_required_parameters = ["estimator", "param_grid"]
def __init__(self, estimator, param_grid, scoring=None, n_jobs=None,
iid='deprecated', refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=False, param_routing=None):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score, param_routing=param_routing)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
evaluate_candidates(ParameterGrid(self.param_grid))
| true
| true
|
1c464a27e4586e149240c4356a12128973601b60
| 6,714
|
py
|
Python
|
fw_neopixel_pride.py
|
tammymakesthings/fw_neopixel_pride
|
3d8df503f7161a23b11d9298c62d45b2e6c17d60
|
[
"MIT"
] | 2
|
2019-06-09T19:19:34.000Z
|
2021-06-02T20:40:21.000Z
|
fw_neopixel_pride.py
|
tammymakesthings/fw_neopixel_pride
|
3d8df503f7161a23b11d9298c62d45b2e6c17d60
|
[
"MIT"
] | null | null | null |
fw_neopixel_pride.py
|
tammymakesthings/fw_neopixel_pride
|
3d8df503f7161a23b11d9298c62d45b2e6c17d60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Pride Flag NetPixel Badge
Displays a bunch of different Pride flags on a NeoPixel grid. Designed for use
with the Adafruit Feather M0 Express and NeoPixel FeatherWing.
Full details at <http://github.com/tammymakesthings/cpy_neopixel_pride>
@author: tammy.cravit
@license: MIT
"""
import sys
from time import sleep
# Delay in seconds between frames of the animation.
ANIMATION_SPEED = 0.3
# Time in seconds to hold each flag on screen before switching.
SHOW_PATTERN_DELAY = 15
# Intensity (0-1) of the NeoPixels. Higher intensity is brighter but draws
# more current.
PATTERN_INTENSITY = 0.3
# The number of rows in the NeoPixel grid.
NUM_ROWS = 4
# The number of columns in the NeoPixel grid.
NUM_COLS = 8
# Board pin to which the NeoPixel is connected
neopixel_pin = None
# The NeoPixel object controlling the pixels.
pixels = None
# Do the hardware setup if we're running on CircuitPython.
if sys.implementation.name == "circuitpython":
import time
import board
import neopixel
# Control pin defaults to #6
neopixel_pin = board.D6
pixels = neopixel.NeoPixel(neopixel_pin, (NUM_ROWS * NUM_COLS),
brightness=PATTERN_INTENSITY, auto_write=False)
############################################################################
# Define all of the flag color palettes
############################################################################
flag_colors = {
"-": (0, 0, 0), # Black
# LGBT Flag
'A': (231, 0, 0), # Electric Red
'B': (224, 89, 17), # Dark Orange
'C': (255, 239, 0), # Canary Yellow
'D': (0, 129, 31), # La Salle Green
'E': (0, 68, 255), # Blue (RYB)
'F': (118, 0, 137), # Patriarch
# Trans Flag
'G': (65, 175, 222), # Maya Blue
'H': (255, 255, 255), # White
'I': (217, 148, 144), # Amaranth Pink
# Bi Pride Flag
'J': (215, 2, 112), # Magenta
'K': (115, 79, 150), # Deep Lavender
'L': (0, 56, 168), # Royal
# Nonbinary Flag
'M': (255, 239, 0), # Yellow
'N': (230, 230, 230), # White
'O': (255, 20, 140), # Lavender
# Pansexual Flag
'P': (255, 20, 140), # Deep Pink
'Q': (255, 218, 0), # Sizzling Sunrise
'R': (5, 174, 255) # Blue Bolt
}
############################################################################
# Define the actual flag patterns. Each pattern must refernece colors defined
# in the associated color map. The pattern contains one letter per column of
# the display.
############################################################################
patterns = {
'pride_flag': {'pattern': '-ABCDEF-', 'colors': flag_colors},
'trans_flag': {'pattern': '-JKLKJ--', 'colors': flag_colors},
'bi_flag' : {'pattern': '--JJKLL-', 'colors': flag_colors},
'nb_flag' : {'pattern': 'MMNNOO--', 'colors': flag_colors},
'pan_flag' : {'pattern': '-PPQQRR-', 'colors': flag_colors},
}
############################################################################
# Helper functions
############################################################################
def clear_pixels(rows=NUM_ROWS, cols=NUM_COLS):
"""
.. function:: clear_pixels([rows, cols])
Clear the entire pixel array.
Sets all of the pixels in the NeoPixel array to black and hten writes
the values to the array. Has no effect if not running on a CircuitPython
device.
:param rows: number of rows in the array (defaults to value of NUM_ROWS)
:param cols: number of cols in the array (defaults to value of NUM_COLS)
:rtype: None
"""
print("inside clearPixels({0}, {1})".format(rows, cols))
if pixels is not None:
pixels.fill(0, 0, 0)
pixels.show()
def set_column(display_column, rgb_value):
"""
.. function:: set_column(display_column, rgb_value)
Set all pixels in one column of the display to the given color.
:param display_column: The column on the display to set
:param rgb_value: The RGB color to set the pixels to
:type rgb_value: 3-tuple (R, G, B)
:rtype: None
"""
print('Called set_column({0}, {1})'.format(display_column, rgb_value))
if pixels is not None:
for i in range(0, NUM_ROWS):
which_pixel = (i * NUM_COLS) + display_column
pixels[which_pixel] = rgb_value
def slide_in_animation(the_pattern, color_map, animation_speed=ANIMATION_SPEED):
"""
.. function:: slide_in_animation(the_pattern, color_map, animation_speed)
Render the animation for a single flag.
:param the_pattern: The flag pattern, rendered as a string. Each character
in the string should match a color in the color map.
:param color_map: The color map. The keys of the dictionary should be a
character from the pattern. The value of the dictionary entries should be
3-tuples with the R, G, B values for the specified color.
:type color_map: dict
:param animation_speed: The time (in seconds) to sleep between frames of
the animation.
:rtype: None
"""
print("inside slideInAnimation({0}, {1}, {2})".format(the_pattern, color_map, animation_speed))
for i in range(0, len(the_pattern)):
starting_column = len(the_pattern) - i - 1
ending_column = len(the_pattern)
which_letter = 0
print("Animation: Repetition {0}, starting column={1}".format(i+1, starting_column))
for j in range(0, starting_column):
set_column(j, (0,0,0))
print("-", sep='', end='')
for j in range(starting_column, ending_column):
print(the_pattern[which_letter], sep='', end='')
set_column(j, color_map[the_pattern[which_letter]])
which_letter += 1
print('\n')
if sys.implementation.name == "circuitpython":
pixels.show()
sleep(animation_speed)
def renderAllPatterns(the_patterns):
for pattern_name, pattern_data in the_patterns.items():
print("renderAllPatterns(): rendering flag: {0}".format(pattern_name))
the_pattern = pattern_data['pattern']
color_map = pattern_data['colors']
slide_in_animation(the_pattern, color_map)
sleep(SHOW_PATTERN_DELAY)
############################################################################
# Main execution loop
############################################################################
if __name__=="__main__":
while True:
renderAllPatterns(patterns)
| 34.608247
| 99
| 0.566279
|
import sys
from time import sleep
ANIMATION_SPEED = 0.3
SHOW_PATTERN_DELAY = 15
PATTERN_INTENSITY = 0.3
NUM_ROWS = 4
NUM_COLS = 8
neopixel_pin = None
pixels = None
if sys.implementation.name == "circuitpython":
import time
import board
import neopixel
# Control pin defaults to #6
neopixel_pin = board.D6
pixels = neopixel.NeoPixel(neopixel_pin, (NUM_ROWS * NUM_COLS),
brightness=PATTERN_INTENSITY, auto_write=False)
############################################################################
# Define all of the flag color palettes
############################################################################
flag_colors = {
"-": (0, 0, 0), # Black
# LGBT Flag
'A': (231, 0, 0), # Electric Red
'B': (224, 89, 17), # Dark Orange
'C': (255, 239, 0), # Canary Yellow
'D': (0, 129, 31), # La Salle Green
'E': (0, 68, 255), # Blue (RYB)
'F': (118, 0, 137), # Patriarch
# Trans Flag
'G': (65, 175, 222), # Maya Blue
'H': (255, 255, 255), # White
'I': (217, 148, 144), # Amaranth Pink
# Bi Pride Flag
'J': (215, 2, 112), # Magenta
'K': (115, 79, 150), # Deep Lavender
'L': (0, 56, 168), # Royal
# Nonbinary Flag
'M': (255, 239, 0), # Yellow
'N': (230, 230, 230), # White
'O': (255, 20, 140), # Lavender
# Pansexual Flag
'P': (255, 20, 140), # Deep Pink
'Q': (255, 218, 0), # Sizzling Sunrise
'R': (5, 174, 255) # Blue Bolt
}
############################################################################
# Define the actual flag patterns. Each pattern must refernece colors defined
# in the associated color map. The pattern contains one letter per column of
# the display.
############################################################################
patterns = {
'pride_flag': {'pattern': '-ABCDEF-', 'colors': flag_colors},
'trans_flag': {'pattern': '-JKLKJ--', 'colors': flag_colors},
'bi_flag' : {'pattern': '--JJKLL-', 'colors': flag_colors},
'nb_flag' : {'pattern': 'MMNNOO--', 'colors': flag_colors},
'pan_flag' : {'pattern': '-PPQQRR-', 'colors': flag_colors},
}
############################################################################
# Helper functions
############################################################################
def clear_pixels(rows=NUM_ROWS, cols=NUM_COLS):
print("inside clearPixels({0}, {1})".format(rows, cols))
if pixels is not None:
pixels.fill(0, 0, 0)
pixels.show()
def set_column(display_column, rgb_value):
print('Called set_column({0}, {1})'.format(display_column, rgb_value))
if pixels is not None:
for i in range(0, NUM_ROWS):
which_pixel = (i * NUM_COLS) + display_column
pixels[which_pixel] = rgb_value
def slide_in_animation(the_pattern, color_map, animation_speed=ANIMATION_SPEED):
print("inside slideInAnimation({0}, {1}, {2})".format(the_pattern, color_map, animation_speed))
for i in range(0, len(the_pattern)):
starting_column = len(the_pattern) - i - 1
ending_column = len(the_pattern)
which_letter = 0
print("Animation: Repetition {0}, starting column={1}".format(i+1, starting_column))
for j in range(0, starting_column):
set_column(j, (0,0,0))
print("-", sep='', end='')
for j in range(starting_column, ending_column):
print(the_pattern[which_letter], sep='', end='')
set_column(j, color_map[the_pattern[which_letter]])
which_letter += 1
print('\n')
if sys.implementation.name == "circuitpython":
pixels.show()
sleep(animation_speed)
def renderAllPatterns(the_patterns):
for pattern_name, pattern_data in the_patterns.items():
print("renderAllPatterns(): rendering flag: {0}".format(pattern_name))
the_pattern = pattern_data['pattern']
color_map = pattern_data['colors']
slide_in_animation(the_pattern, color_map)
sleep(SHOW_PATTERN_DELAY)
############################################################################
# Main execution loop
############################################################################
if __name__=="__main__":
while True:
renderAllPatterns(patterns)
| true
| true
|
1c464c7f4975739b483955b49e931f3e73459cb0
| 966
|
py
|
Python
|
pietoolbelt/augmentations/segmentation.py
|
kitkat52/pietoolbelt
|
0e0b5859662fcb43b008218746cc3e76cc66b6b8
|
[
"MIT"
] | 1
|
2021-05-30T08:21:12.000Z
|
2021-05-30T08:21:12.000Z
|
pietoolbelt/augmentations/segmentation.py
|
kitkat52/pietoolbelt
|
0e0b5859662fcb43b008218746cc3e76cc66b6b8
|
[
"MIT"
] | 7
|
2020-07-07T21:04:08.000Z
|
2021-12-13T10:08:17.000Z
|
pietoolbelt/augmentations/segmentation.py
|
kitkat52/pietoolbelt
|
0e0b5859662fcb43b008218746cc3e76cc66b6b8
|
[
"MIT"
] | 1
|
2021-06-17T09:21:39.000Z
|
2021-06-17T09:21:39.000Z
|
import numpy as np
import torch
from .common import BaseAugmentations
__all__ = ['SegmentationAugmentations']
class SegmentationAugmentations(BaseAugmentations):
def __init__(self, is_train: bool, to_pytorch: bool, preprocess: callable):
super().__init__(is_train, to_pytorch, preprocess)
def augmentation(self, data: dict) -> dict:
augmented = self._aug(image=data['data'], mask=data['target'] / (data['target'].max() + 1e-7))
img, mask = augmented['image'], augmented['mask']
if self._need_to_pytorch:
img, mask = self.img_to_pytorch(img), self.mask_to_pytorch(mask)
return {'data': img, 'target': mask}
@staticmethod
def img_to_pytorch(image):
return torch.from_numpy(np.expand_dims(np.moveaxis(image, -1, 0).astype(np.float32) / 128 - 1, axis=0))
@staticmethod
def mask_to_pytorch(mask):
return torch.from_numpy(np.expand_dims(mask.astype(np.float32), axis=0))
| 33.310345
| 111
| 0.68323
|
import numpy as np
import torch
from .common import BaseAugmentations
__all__ = ['SegmentationAugmentations']
class SegmentationAugmentations(BaseAugmentations):
def __init__(self, is_train: bool, to_pytorch: bool, preprocess: callable):
super().__init__(is_train, to_pytorch, preprocess)
def augmentation(self, data: dict) -> dict:
augmented = self._aug(image=data['data'], mask=data['target'] / (data['target'].max() + 1e-7))
img, mask = augmented['image'], augmented['mask']
if self._need_to_pytorch:
img, mask = self.img_to_pytorch(img), self.mask_to_pytorch(mask)
return {'data': img, 'target': mask}
@staticmethod
def img_to_pytorch(image):
return torch.from_numpy(np.expand_dims(np.moveaxis(image, -1, 0).astype(np.float32) / 128 - 1, axis=0))
@staticmethod
def mask_to_pytorch(mask):
return torch.from_numpy(np.expand_dims(mask.astype(np.float32), axis=0))
| true
| true
|
1c464c918d295b7c3a348cdb1a566cf0a3e06af7
| 5,165
|
py
|
Python
|
starfish/core/experiment/builder/test/factories/all_purpose.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
starfish/core/experiment/builder/test/factories/all_purpose.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
starfish/core/experiment/builder/test/factories/all_purpose.py
|
kne42/starfish
|
78b348c9756f367221dcca725cfa5107e5520b33
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta
from typing import Callable, cast, Collection, Mapping, Sequence, Type, Union
import numpy as np
import slicedimage
from starfish.core.experiment.builder import (
build_irregular_image,
FetchedTile,
tile_fetcher_factory,
TileFetcher,
TileIdentifier,
)
from starfish.core.types import Axes, Coordinates, CoordinateValue
class LocationAwareFetchedTile(FetchedTile, metaclass=ABCMeta):
"""This is the base class for tiles that are aware of their location in the 5D tensor.
"""
def __init__(
self,
# these are the arguments passed in as a result of tile_fetcher_factory's
# pass_tile_indices parameter.
fov_id: int, round_label: int, ch_label: int, zplane_label: int,
# these are the arguments we are passing through tile_fetcher_factory.
fovs: Sequence[int], rounds: Sequence[int], chs: Sequence[int], zplanes: Sequence[int],
tile_height: int, tile_width: int,
) -> None:
super().__init__()
self.fov_id = fov_id
self.round_label = round_label
self.ch_label = ch_label
self.zplane_label = zplane_label
self.fovs = fovs
self.rounds = rounds
self.chs = chs
self.zplanes = zplanes
self.tile_height = tile_height
self.tile_width = tile_width
def _apply_coords_range_fetcher(
backing_tile_fetcher: TileFetcher,
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
) -> TileFetcher:
"""Given a :py:class:`TileFetcher`, intercept all the returned :py:class:`FetchedTile` instances
and replace the coordinates using the coordinates from `tile_coordinates_callback`."""
class ModifiedTile(FetchedTile):
def __init__(
self,
backing_tile: FetchedTile,
tile_identifier: TileIdentifier,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.backing_tile = backing_tile
self.tile_identifier = tile_identifier
@property
def shape(self) -> Mapping[Axes, int]:
return self.backing_tile.shape
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], CoordinateValue]:
return cast(
Mapping[Union[str, Coordinates], CoordinateValue],
tile_coordinates_callback(self.tile_identifier))
def tile_data(self) -> np.ndarray:
return self.backing_tile.tile_data()
class ModifiedTileFetcher(TileFetcher):
def get_tile(
self, fov_id: int, round_label: int, ch_label: int, zplane_label: int,
) -> FetchedTile:
original_fetched_tile = backing_tile_fetcher.get_tile(
fov_id, round_label, ch_label, zplane_label)
tile_identifier = TileIdentifier(fov_id, round_label, ch_label, zplane_label)
return ModifiedTile(original_fetched_tile, tile_identifier)
return ModifiedTileFetcher()
def collection_factory(
fetched_tile_cls: Type[LocationAwareFetchedTile],
tile_identifiers: Collection[TileIdentifier],
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
tile_height: int,
tile_width: int,
) -> slicedimage.Collection:
"""Given a type that implements the :py:class:`LocationAwareFetchedTile` contract, produce a
slicedimage Collection with the tiles in `tile_identifiers`. For a given tile_identifier,
retrieve the coordinates by invoking the callback `tile_coordinates_callback`.
Parameters
----------
fetched_tile_cls : Type[LocationAwareFetchedTile]
The class of the FetchedTile.
tile_identifiers : Collection[TileIdentifier]
TileIdentifiers for each of the tiles in the collection.
tile_coordinates_callback : Callable[[TileIdentifier], Mapping[Coordinates, CoordinatesValue]]
A callable that returns the coordinates for a given tile's TileIdentifier.
tile_height : int
Height of each tile, in pixels.
tile_width : int
Width of each tile, in pixels.
"""
all_fov_ids = sorted(set(
tile_identifier.fov_id for tile_identifier in tile_identifiers))
all_round_labels = sorted(set(
tile_identifier.round_label for tile_identifier in tile_identifiers))
all_ch_labels = sorted(set(
tile_identifier.ch_label for tile_identifier in tile_identifiers))
all_zplane_labels = sorted(set(
tile_identifier.zplane_label for tile_identifier in tile_identifiers))
original_tile_fetcher = tile_fetcher_factory(
fetched_tile_cls, True,
all_fov_ids, all_round_labels, all_ch_labels, all_zplane_labels,
tile_height, tile_width,
)
modified_tile_fetcher = _apply_coords_range_fetcher(
original_tile_fetcher, tile_coordinates_callback)
return build_irregular_image(
tile_identifiers,
modified_tile_fetcher,
default_shape={Axes.Y: tile_height, Axes.X: tile_width}
)
| 39.128788
| 100
| 0.684802
|
from abc import ABCMeta
from typing import Callable, cast, Collection, Mapping, Sequence, Type, Union
import numpy as np
import slicedimage
from starfish.core.experiment.builder import (
build_irregular_image,
FetchedTile,
tile_fetcher_factory,
TileFetcher,
TileIdentifier,
)
from starfish.core.types import Axes, Coordinates, CoordinateValue
class LocationAwareFetchedTile(FetchedTile, metaclass=ABCMeta):
def __init__(
self,
# pass_tile_indices parameter.
fov_id: int, round_label: int, ch_label: int, zplane_label: int,
# these are the arguments we are passing through tile_fetcher_factory.
fovs: Sequence[int], rounds: Sequence[int], chs: Sequence[int], zplanes: Sequence[int],
tile_height: int, tile_width: int,
) -> None:
super().__init__()
self.fov_id = fov_id
self.round_label = round_label
self.ch_label = ch_label
self.zplane_label = zplane_label
self.fovs = fovs
self.rounds = rounds
self.chs = chs
self.zplanes = zplanes
self.tile_height = tile_height
self.tile_width = tile_width
def _apply_coords_range_fetcher(
backing_tile_fetcher: TileFetcher,
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
) -> TileFetcher:
class ModifiedTile(FetchedTile):
def __init__(
self,
backing_tile: FetchedTile,
tile_identifier: TileIdentifier,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.backing_tile = backing_tile
self.tile_identifier = tile_identifier
@property
def shape(self) -> Mapping[Axes, int]:
return self.backing_tile.shape
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], CoordinateValue]:
return cast(
Mapping[Union[str, Coordinates], CoordinateValue],
tile_coordinates_callback(self.tile_identifier))
def tile_data(self) -> np.ndarray:
return self.backing_tile.tile_data()
class ModifiedTileFetcher(TileFetcher):
def get_tile(
self, fov_id: int, round_label: int, ch_label: int, zplane_label: int,
) -> FetchedTile:
original_fetched_tile = backing_tile_fetcher.get_tile(
fov_id, round_label, ch_label, zplane_label)
tile_identifier = TileIdentifier(fov_id, round_label, ch_label, zplane_label)
return ModifiedTile(original_fetched_tile, tile_identifier)
return ModifiedTileFetcher()
def collection_factory(
fetched_tile_cls: Type[LocationAwareFetchedTile],
tile_identifiers: Collection[TileIdentifier],
tile_coordinates_callback: Callable[
[TileIdentifier], Mapping[Coordinates, CoordinateValue]],
tile_height: int,
tile_width: int,
) -> slicedimage.Collection:
all_fov_ids = sorted(set(
tile_identifier.fov_id for tile_identifier in tile_identifiers))
all_round_labels = sorted(set(
tile_identifier.round_label for tile_identifier in tile_identifiers))
all_ch_labels = sorted(set(
tile_identifier.ch_label for tile_identifier in tile_identifiers))
all_zplane_labels = sorted(set(
tile_identifier.zplane_label for tile_identifier in tile_identifiers))
original_tile_fetcher = tile_fetcher_factory(
fetched_tile_cls, True,
all_fov_ids, all_round_labels, all_ch_labels, all_zplane_labels,
tile_height, tile_width,
)
modified_tile_fetcher = _apply_coords_range_fetcher(
original_tile_fetcher, tile_coordinates_callback)
return build_irregular_image(
tile_identifiers,
modified_tile_fetcher,
default_shape={Axes.Y: tile_height, Axes.X: tile_width}
)
| true
| true
|
1c464cd9a94ce016f5a29a3b4da763617bf225a8
| 75,458
|
py
|
Python
|
simulator/config_pb2.py
|
googleinterns/cluster-resource-forecast
|
48b67346160e4f9c010552b9b20b8bace1a321ad
|
[
"Apache-2.0"
] | 25
|
2020-05-06T21:29:04.000Z
|
2022-02-17T05:25:25.000Z
|
simulator/config_pb2.py
|
touchuyht/cluster-resource-forecast
|
48b67346160e4f9c010552b9b20b8bace1a321ad
|
[
"Apache-2.0"
] | 3
|
2020-06-09T04:14:08.000Z
|
2021-04-25T07:30:38.000Z
|
simulator/config_pb2.py
|
touchuyht/cluster-resource-forecast
|
48b67346160e4f9c010552b9b20b8bace1a321ad
|
[
"Apache-2.0"
] | 12
|
2020-06-05T00:52:01.000Z
|
2021-12-17T06:55:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: simulator/config.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="simulator/config.proto",
package="",
syntax="proto2",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16simulator/config.proto"6\n\nInt64Range\x12\x13\n\x0blower_bound\x18\x01 \x01(\x03\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x03".\n\x0c\x44\x61taLocation\x12\x0f\n\x07\x64\x61taset\x18\x01 \x01(\t\x12\r\n\x05table\x18\x02 \x01(\t"\xb8\x01\n\x08VMFilter\x12\x12\n\nstart_time\x18\x01 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\x03\x12 \n\x18remove_non_top_level_vms\x18\x03 \x01(\x08\x12#\n\x0epriority_range\x18\x04 \x01(\x0b\x32\x0b.Int64Range\x12+\n\x16scheduling_class_range\x18\x05 \x01(\x0b\x32\x0b.Int64Range\x12\x12\n\nmachine_id\x18\x06 \x03(\x03"_\n\x0bLoadOrWrite\x12\x1e\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocationH\x00\x12\x1f\n\x06output\x18\x02 \x01(\x0b\x32\r.DataLocationH\x00\x42\x0f\n\rload_or_write"\xac\x01\n\x16\x41\x62stractMetricSelector\x12\x1a\n\x10max_memory_usage\x18\x01 \x01(\x08H\x00\x12\x1e\n\x14\x63pu_usage_percentile\x18\x02 \x01(\x03H\x00\x12\x17\n\ravg_cpu_usage\x18\x03 \x01(\x08H\x00\x12\x1a\n\x10\x61vg_memory_usage\x18\x04 \x01(\x08H\x00\x12\x17\n\rmax_cpu_usage\x18\x05 \x01(\x08H\x00\x42\x08\n\x06metric"\\\n\rResetAndShift\x12\x1a\n\x12reset_time_to_zero\x18\x01 \x01(\x08\x12!\n\x0crandom_shift\x18\x02 \x01(\x0b\x32\x0b.Int64Range\x12\x0c\n\x04seed\x18\x03 \x01(\x03"\xa6\x01\n\tScheduler\x12(\n\tat_random\x18\x01 \x01(\x0b\x32\x13.Scheduler.AtRandomH\x00\x12\x17\n\rby_machine_id\x18\x02 \x01(\x08H\x00\x12\x19\n\x0f\x62y_vm_unique_id\x18\x03 \x01(\x08H\x00\x1a.\n\x08\x41tRandom\x12\x14\n\x0cnum_machines\x18\x01 \x01(\x03\x12\x0c\n\x04seed\x18\x02 \x01(\x03\x42\x0b\n\tscheduler"\xbc\t\n\x0fPredictorConfig\x12.\n\x14\x64\x65\x63orated_predictors\x18\n \x03(\x0b\x32\x10.PredictorConfig\x12<\n\ravg_predictor\x18\x01 \x01(\x0b\x32#.PredictorConfig.AvgPredictorConfigH\x00\x12<\n\rmax_predictor\x18\x02 \x01(\x0b\x32#.PredictorConfig.MaxPredictorConfigH\x00\x12<\n\ravg_decorator\x18\x03 \x01(\x0b\x32#.PredictorConfig.AvgDecoratorConfigH\x00\x12<\n\rmax_decorator\x18\x04 \x01(\x0b\x32#.PredictorConfig.MaxDecoratorConfigH\x00\x12M\n\x1bper_vm_percentile_predictor\x18\x05 \x01(\x0b\x32&.PredictorConfig.PerVMPercentileConfigH\x00\x12:\n\x11n_sigma_predictor\x18\x06 \x01(\x0b\x32\x1d.PredictorConfig.NSigmaConfigH\x00\x12@\n\x0flimit_predictor\x18\x07 \x01(\x0b\x32%.PredictorConfig.LimitPredictorConfigH\x00\x12W\n per_machine_percentile_predictor\x18\x08 \x01(\x0b\x32+.PredictorConfig.PerMachinePercentileConfigH\x00\x1a\x43\n\x12\x41vgPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a/\n\x14LimitPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x1a\x43\n\x12MaxPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a|\n\x15PerVMPercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x81\x01\n\x1aPerMachinePercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x65\n\x0cNSigmaConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x1b\n\x13num_history_samples\x18\x03 \x01(\x03\x12\t\n\x01n\x18\x04 \x01(\x03\x1a\x14\n\x12\x41vgDecoratorConfig\x1a\x14\n\x12MaxDecoratorConfigB\x0b\n\tpredictor"\xfa\x01\n\x13\x46ortuneTellerConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csave_samples\x18\x02 \x01(\x08\x12\x33\n\x06oracle\x18\x03 \x01(\x0b\x32!.FortuneTellerConfig.OracleConfigH\x00\x12%\n\tpredictor\x18\x04 \x01(\x0b\x32\x10.PredictorConfigH\x00\x1aY\n\x0cOracleConfig\x12\x1a\n\x12horizon_in_seconds\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x03:\x03\x31\x30\x30\x42\x08\n\x06teller"\xfa\x03\n\x10SimulationConfig\x12\x1c\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocation\x12\x19\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\t.VMFilter\x12&\n\x10\x66iltered_samples\x18\x03 \x01(\x0b\x32\x0c.LoadOrWrite\x12*\n\x14time_aligned_samples\x18\x04 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x06metric\x18\x05 \x01(\x0b\x32\x17.AbstractMetricSelector\x12\x33\n\x1dsamples_with_abstract_metrics\x18\x06 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x0freset_and_shift\x18\x07 \x01(\x0b\x32\x0e.ResetAndShift\x12\x32\n\x1csamples_with_reset_and_shift\x18\x08 \x01(\x0b\x32\x0c.LoadOrWrite\x12\x1d\n\tscheduler\x18\t \x01(\x0b\x32\n.Scheduler\x12\'\n\x11scheduled_samples\x18\n \x01(\x0b\x32\x0c.LoadOrWrite\x12,\n\x0e\x66ortune_teller\x18\x0b \x03(\x0b\x32\x14.FortuneTellerConfig\x12(\n\x11simulation_result\x18\x0c \x01(\x0b\x32\r.DataLocation',
)
_INT64RANGE = _descriptor.Descriptor(
name="Int64Range",
full_name="Int64Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="lower_bound",
full_name="Int64Range.lower_bound",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="upper_bound",
full_name="Int64Range.upper_bound",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=26,
serialized_end=80,
)
_DATALOCATION = _descriptor.Descriptor(
name="DataLocation",
full_name="DataLocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="dataset",
full_name="DataLocation.dataset",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="table",
full_name="DataLocation.table",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=82,
serialized_end=128,
)
_VMFILTER = _descriptor.Descriptor(
name="VMFilter",
full_name="VMFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="VMFilter.start_time",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="VMFilter.end_time",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_non_top_level_vms",
full_name="VMFilter.remove_non_top_level_vms",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="priority_range",
full_name="VMFilter.priority_range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduling_class_range",
full_name="VMFilter.scheduling_class_range",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="machine_id",
full_name="VMFilter.machine_id",
index=5,
number=6,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=131,
serialized_end=315,
)
_LOADORWRITE = _descriptor.Descriptor(
name="LoadOrWrite",
full_name="LoadOrWrite",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="LoadOrWrite.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output",
full_name="LoadOrWrite.output",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="load_or_write",
full_name="LoadOrWrite.load_or_write",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=317,
serialized_end=412,
)
_ABSTRACTMETRICSELECTOR = _descriptor.Descriptor(
name="AbstractMetricSelector",
full_name="AbstractMetricSelector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="max_memory_usage",
full_name="AbstractMetricSelector.max_memory_usage",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cpu_usage_percentile",
full_name="AbstractMetricSelector.cpu_usage_percentile",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_cpu_usage",
full_name="AbstractMetricSelector.avg_cpu_usage",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_memory_usage",
full_name="AbstractMetricSelector.avg_memory_usage",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_cpu_usage",
full_name="AbstractMetricSelector.max_cpu_usage",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="metric",
full_name="AbstractMetricSelector.metric",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=415,
serialized_end=587,
)
_RESETANDSHIFT = _descriptor.Descriptor(
name="ResetAndShift",
full_name="ResetAndShift",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="reset_time_to_zero",
full_name="ResetAndShift.reset_time_to_zero",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="random_shift",
full_name="ResetAndShift.random_shift",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="ResetAndShift.seed",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=589,
serialized_end=681,
)
_SCHEDULER_ATRANDOM = _descriptor.Descriptor(
name="AtRandom",
full_name="Scheduler.AtRandom",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="num_machines",
full_name="Scheduler.AtRandom.num_machines",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="Scheduler.AtRandom.seed",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=791,
serialized_end=837,
)
_SCHEDULER = _descriptor.Descriptor(
name="Scheduler",
full_name="Scheduler",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="at_random",
full_name="Scheduler.at_random",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_machine_id",
full_name="Scheduler.by_machine_id",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_vm_unique_id",
full_name="Scheduler.by_vm_unique_id",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_SCHEDULER_ATRANDOM,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="scheduler",
full_name="Scheduler.scheduler",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=684,
serialized_end=850,
)
_PREDICTORCONFIG_AVGPREDICTORCONFIG = _descriptor.Descriptor(
name="AvgPredictorConfig",
full_name="PredictorConfig.AvgPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.AvgPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.AvgPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1462,
serialized_end=1529,
)
_PREDICTORCONFIG_LIMITPREDICTORCONFIG = _descriptor.Descriptor(
name="LimitPredictorConfig",
full_name="PredictorConfig.LimitPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.LimitPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1531,
serialized_end=1578,
)
_PREDICTORCONFIG_MAXPREDICTORCONFIG = _descriptor.Descriptor(
name="MaxPredictorConfig",
full_name="PredictorConfig.MaxPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.MaxPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.MaxPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1580,
serialized_end=1647,
)
_PREDICTORCONFIG_PERVMPERCENTILECONFIG = _descriptor.Descriptor(
name="PerVMPercentileConfig",
full_name="PredictorConfig.PerVMPercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerVMPercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerVMPercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerVMPercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerVMPercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1649,
serialized_end=1773,
)
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG = _descriptor.Descriptor(
name="PerMachinePercentileConfig",
full_name="PredictorConfig.PerMachinePercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerMachinePercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerMachinePercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1776,
serialized_end=1905,
)
_PREDICTORCONFIG_NSIGMACONFIG = _descriptor.Descriptor(
name="NSigmaConfig",
full_name="PredictorConfig.NSigmaConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.NSigmaConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.NSigmaConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.NSigmaConfig.num_history_samples",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n",
full_name="PredictorConfig.NSigmaConfig.n",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1907,
serialized_end=2008,
)
_PREDICTORCONFIG_AVGDECORATORCONFIG = _descriptor.Descriptor(
name="AvgDecoratorConfig",
full_name="PredictorConfig.AvgDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2010,
serialized_end=2030,
)
_PREDICTORCONFIG_MAXDECORATORCONFIG = _descriptor.Descriptor(
name="MaxDecoratorConfig",
full_name="PredictorConfig.MaxDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2032,
serialized_end=2052,
)
_PREDICTORCONFIG = _descriptor.Descriptor(
name="PredictorConfig",
full_name="PredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="decorated_predictors",
full_name="PredictorConfig.decorated_predictors",
index=0,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_predictor",
full_name="PredictorConfig.avg_predictor",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_predictor",
full_name="PredictorConfig.max_predictor",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_decorator",
full_name="PredictorConfig.avg_decorator",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_decorator",
full_name="PredictorConfig.max_decorator",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_vm_percentile_predictor",
full_name="PredictorConfig.per_vm_percentile_predictor",
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n_sigma_predictor",
full_name="PredictorConfig.n_sigma_predictor",
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="limit_predictor",
full_name="PredictorConfig.limit_predictor",
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_machine_percentile_predictor",
full_name="PredictorConfig.per_machine_percentile_predictor",
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_PREDICTORCONFIG_AVGPREDICTORCONFIG,
_PREDICTORCONFIG_LIMITPREDICTORCONFIG,
_PREDICTORCONFIG_MAXPREDICTORCONFIG,
_PREDICTORCONFIG_PERVMPERCENTILECONFIG,
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
_PREDICTORCONFIG_NSIGMACONFIG,
_PREDICTORCONFIG_AVGDECORATORCONFIG,
_PREDICTORCONFIG_MAXDECORATORCONFIG,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="predictor",
full_name="PredictorConfig.predictor",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=853,
serialized_end=2065,
)
_FORTUNETELLERCONFIG_ORACLECONFIG = _descriptor.Descriptor(
name="OracleConfig",
full_name="FortuneTellerConfig.OracleConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="horizon_in_seconds",
full_name="FortuneTellerConfig.OracleConfig.horizon_in_seconds",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="FortuneTellerConfig.OracleConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="FortuneTellerConfig.OracleConfig.percentile",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=True,
default_value=100,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2219,
serialized_end=2308,
)
_FORTUNETELLERCONFIG = _descriptor.Descriptor(
name="FortuneTellerConfig",
full_name="FortuneTellerConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="FortuneTellerConfig.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="save_samples",
full_name="FortuneTellerConfig.save_samples",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="oracle",
full_name="FortuneTellerConfig.oracle",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="predictor",
full_name="FortuneTellerConfig.predictor",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FORTUNETELLERCONFIG_ORACLECONFIG,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="teller",
full_name="FortuneTellerConfig.teller",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=2068,
serialized_end=2318,
)
_SIMULATIONCONFIG = _descriptor.Descriptor(
name="SimulationConfig",
full_name="SimulationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="SimulationConfig.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="SimulationConfig.filter",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filtered_samples",
full_name="SimulationConfig.filtered_samples",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_aligned_samples",
full_name="SimulationConfig.time_aligned_samples",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="metric",
full_name="SimulationConfig.metric",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_abstract_metrics",
full_name="SimulationConfig.samples_with_abstract_metrics",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reset_and_shift",
full_name="SimulationConfig.reset_and_shift",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_reset_and_shift",
full_name="SimulationConfig.samples_with_reset_and_shift",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduler",
full_name="SimulationConfig.scheduler",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduled_samples",
full_name="SimulationConfig.scheduled_samples",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="fortune_teller",
full_name="SimulationConfig.fortune_teller",
index=10,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="simulation_result",
full_name="SimulationConfig.simulation_result",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2321,
serialized_end=2827,
)
_VMFILTER.fields_by_name["priority_range"].message_type = _INT64RANGE
_VMFILTER.fields_by_name["scheduling_class_range"].message_type = _INT64RANGE
_LOADORWRITE.fields_by_name["input"].message_type = _DATALOCATION
_LOADORWRITE.fields_by_name["output"].message_type = _DATALOCATION
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["input"]
)
_LOADORWRITE.fields_by_name["input"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["output"]
)
_LOADORWRITE.fields_by_name["output"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["cpu_usage_percentile"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"cpu_usage_percentile"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_RESETANDSHIFT.fields_by_name["random_shift"].message_type = _INT64RANGE
_SCHEDULER_ATRANDOM.containing_type = _SCHEDULER
_SCHEDULER.fields_by_name["at_random"].message_type = _SCHEDULER_ATRANDOM
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["at_random"]
)
_SCHEDULER.fields_by_name["at_random"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_machine_id"]
)
_SCHEDULER.fields_by_name["by_machine_id"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_vm_unique_id"]
)
_SCHEDULER.fields_by_name[
"by_vm_unique_id"
].containing_oneof = _SCHEDULER.oneofs_by_name["scheduler"]
_PREDICTORCONFIG_AVGPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_LIMITPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERVMPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_NSIGMACONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_AVGDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name["decorated_predictors"].message_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].message_type = _PREDICTORCONFIG_AVGPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].message_type = _PREDICTORCONFIG_MAXPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].message_type = _PREDICTORCONFIG_AVGDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].message_type = _PREDICTORCONFIG_MAXDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERVMPERCENTILECONFIG
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].message_type = _PREDICTORCONFIG_NSIGMACONFIG
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].message_type = _PREDICTORCONFIG_LIMITPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_vm_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["n_sigma_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["limit_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_machine_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_FORTUNETELLERCONFIG_ORACLECONFIG.containing_type = _FORTUNETELLERCONFIG
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].message_type = _FORTUNETELLERCONFIG_ORACLECONFIG
_FORTUNETELLERCONFIG.fields_by_name["predictor"].message_type = _PREDICTORCONFIG
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["oracle"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["predictor"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"predictor"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_SIMULATIONCONFIG.fields_by_name["input"].message_type = _DATALOCATION
_SIMULATIONCONFIG.fields_by_name["filter"].message_type = _VMFILTER
_SIMULATIONCONFIG.fields_by_name["filtered_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["time_aligned_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["metric"].message_type = _ABSTRACTMETRICSELECTOR
_SIMULATIONCONFIG.fields_by_name[
"samples_with_abstract_metrics"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["reset_and_shift"].message_type = _RESETANDSHIFT
_SIMULATIONCONFIG.fields_by_name[
"samples_with_reset_and_shift"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["scheduler"].message_type = _SCHEDULER
_SIMULATIONCONFIG.fields_by_name["scheduled_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["fortune_teller"].message_type = _FORTUNETELLERCONFIG
_SIMULATIONCONFIG.fields_by_name["simulation_result"].message_type = _DATALOCATION
DESCRIPTOR.message_types_by_name["Int64Range"] = _INT64RANGE
DESCRIPTOR.message_types_by_name["DataLocation"] = _DATALOCATION
DESCRIPTOR.message_types_by_name["VMFilter"] = _VMFILTER
DESCRIPTOR.message_types_by_name["LoadOrWrite"] = _LOADORWRITE
DESCRIPTOR.message_types_by_name["AbstractMetricSelector"] = _ABSTRACTMETRICSELECTOR
DESCRIPTOR.message_types_by_name["ResetAndShift"] = _RESETANDSHIFT
DESCRIPTOR.message_types_by_name["Scheduler"] = _SCHEDULER
DESCRIPTOR.message_types_by_name["PredictorConfig"] = _PREDICTORCONFIG
DESCRIPTOR.message_types_by_name["FortuneTellerConfig"] = _FORTUNETELLERCONFIG
DESCRIPTOR.message_types_by_name["SimulationConfig"] = _SIMULATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Int64Range = _reflection.GeneratedProtocolMessageType(
"Int64Range",
(_message.Message,),
{
"DESCRIPTOR": _INT64RANGE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Int64Range)
},
)
_sym_db.RegisterMessage(Int64Range)
DataLocation = _reflection.GeneratedProtocolMessageType(
"DataLocation",
(_message.Message,),
{
"DESCRIPTOR": _DATALOCATION,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:DataLocation)
},
)
_sym_db.RegisterMessage(DataLocation)
VMFilter = _reflection.GeneratedProtocolMessageType(
"VMFilter",
(_message.Message,),
{
"DESCRIPTOR": _VMFILTER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:VMFilter)
},
)
_sym_db.RegisterMessage(VMFilter)
LoadOrWrite = _reflection.GeneratedProtocolMessageType(
"LoadOrWrite",
(_message.Message,),
{
"DESCRIPTOR": _LOADORWRITE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:LoadOrWrite)
},
)
_sym_db.RegisterMessage(LoadOrWrite)
AbstractMetricSelector = _reflection.GeneratedProtocolMessageType(
"AbstractMetricSelector",
(_message.Message,),
{
"DESCRIPTOR": _ABSTRACTMETRICSELECTOR,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:AbstractMetricSelector)
},
)
_sym_db.RegisterMessage(AbstractMetricSelector)
ResetAndShift = _reflection.GeneratedProtocolMessageType(
"ResetAndShift",
(_message.Message,),
{
"DESCRIPTOR": _RESETANDSHIFT,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:ResetAndShift)
},
)
_sym_db.RegisterMessage(ResetAndShift)
Scheduler = _reflection.GeneratedProtocolMessageType(
"Scheduler",
(_message.Message,),
{
"AtRandom": _reflection.GeneratedProtocolMessageType(
"AtRandom",
(_message.Message,),
{
"DESCRIPTOR": _SCHEDULER_ATRANDOM,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler.AtRandom)
},
),
"DESCRIPTOR": _SCHEDULER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler)
},
)
_sym_db.RegisterMessage(Scheduler)
_sym_db.RegisterMessage(Scheduler.AtRandom)
PredictorConfig = _reflection.GeneratedProtocolMessageType(
"PredictorConfig",
(_message.Message,),
{
"AvgPredictorConfig": _reflection.GeneratedProtocolMessageType(
"AvgPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgPredictorConfig)
},
),
"LimitPredictorConfig": _reflection.GeneratedProtocolMessageType(
"LimitPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_LIMITPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.LimitPredictorConfig)
},
),
"MaxPredictorConfig": _reflection.GeneratedProtocolMessageType(
"MaxPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxPredictorConfig)
},
),
"PerVMPercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerVMPercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERVMPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerVMPercentileConfig)
},
),
"PerMachinePercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerMachinePercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerMachinePercentileConfig)
},
),
"NSigmaConfig": _reflection.GeneratedProtocolMessageType(
"NSigmaConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_NSIGMACONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.NSigmaConfig)
},
),
"AvgDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"AvgDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgDecoratorConfig)
},
),
"MaxDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"MaxDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxDecoratorConfig)
},
),
"DESCRIPTOR": _PREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig)
},
)
_sym_db.RegisterMessage(PredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.LimitPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.PerVMPercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.PerMachinePercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.NSigmaConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgDecoratorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxDecoratorConfig)
FortuneTellerConfig = _reflection.GeneratedProtocolMessageType(
"FortuneTellerConfig",
(_message.Message,),
{
"OracleConfig": _reflection.GeneratedProtocolMessageType(
"OracleConfig",
(_message.Message,),
{
"DESCRIPTOR": _FORTUNETELLERCONFIG_ORACLECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig.OracleConfig)
},
),
"DESCRIPTOR": _FORTUNETELLERCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig)
},
)
_sym_db.RegisterMessage(FortuneTellerConfig)
_sym_db.RegisterMessage(FortuneTellerConfig.OracleConfig)
SimulationConfig = _reflection.GeneratedProtocolMessageType(
"SimulationConfig",
(_message.Message,),
{
"DESCRIPTOR": _SIMULATIONCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:SimulationConfig)
},
)
_sym_db.RegisterMessage(SimulationConfig)
# @@protoc_insertion_point(module_scope)
| 33.686607
| 4,812
| 0.620279
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="simulator/config.proto",
package="",
syntax="proto2",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x16simulator/config.proto"6\n\nInt64Range\x12\x13\n\x0blower_bound\x18\x01 \x01(\x03\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x03".\n\x0c\x44\x61taLocation\x12\x0f\n\x07\x64\x61taset\x18\x01 \x01(\t\x12\r\n\x05table\x18\x02 \x01(\t"\xb8\x01\n\x08VMFilter\x12\x12\n\nstart_time\x18\x01 \x01(\x03\x12\x10\n\x08\x65nd_time\x18\x02 \x01(\x03\x12 \n\x18remove_non_top_level_vms\x18\x03 \x01(\x08\x12#\n\x0epriority_range\x18\x04 \x01(\x0b\x32\x0b.Int64Range\x12+\n\x16scheduling_class_range\x18\x05 \x01(\x0b\x32\x0b.Int64Range\x12\x12\n\nmachine_id\x18\x06 \x03(\x03"_\n\x0bLoadOrWrite\x12\x1e\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocationH\x00\x12\x1f\n\x06output\x18\x02 \x01(\x0b\x32\r.DataLocationH\x00\x42\x0f\n\rload_or_write"\xac\x01\n\x16\x41\x62stractMetricSelector\x12\x1a\n\x10max_memory_usage\x18\x01 \x01(\x08H\x00\x12\x1e\n\x14\x63pu_usage_percentile\x18\x02 \x01(\x03H\x00\x12\x17\n\ravg_cpu_usage\x18\x03 \x01(\x08H\x00\x12\x1a\n\x10\x61vg_memory_usage\x18\x04 \x01(\x08H\x00\x12\x17\n\rmax_cpu_usage\x18\x05 \x01(\x08H\x00\x42\x08\n\x06metric"\\\n\rResetAndShift\x12\x1a\n\x12reset_time_to_zero\x18\x01 \x01(\x08\x12!\n\x0crandom_shift\x18\x02 \x01(\x0b\x32\x0b.Int64Range\x12\x0c\n\x04seed\x18\x03 \x01(\x03"\xa6\x01\n\tScheduler\x12(\n\tat_random\x18\x01 \x01(\x0b\x32\x13.Scheduler.AtRandomH\x00\x12\x17\n\rby_machine_id\x18\x02 \x01(\x08H\x00\x12\x19\n\x0f\x62y_vm_unique_id\x18\x03 \x01(\x08H\x00\x1a.\n\x08\x41tRandom\x12\x14\n\x0cnum_machines\x18\x01 \x01(\x03\x12\x0c\n\x04seed\x18\x02 \x01(\x03\x42\x0b\n\tscheduler"\xbc\t\n\x0fPredictorConfig\x12.\n\x14\x64\x65\x63orated_predictors\x18\n \x03(\x0b\x32\x10.PredictorConfig\x12<\n\ravg_predictor\x18\x01 \x01(\x0b\x32#.PredictorConfig.AvgPredictorConfigH\x00\x12<\n\rmax_predictor\x18\x02 \x01(\x0b\x32#.PredictorConfig.MaxPredictorConfigH\x00\x12<\n\ravg_decorator\x18\x03 \x01(\x0b\x32#.PredictorConfig.AvgDecoratorConfigH\x00\x12<\n\rmax_decorator\x18\x04 \x01(\x0b\x32#.PredictorConfig.MaxDecoratorConfigH\x00\x12M\n\x1bper_vm_percentile_predictor\x18\x05 \x01(\x0b\x32&.PredictorConfig.PerVMPercentileConfigH\x00\x12:\n\x11n_sigma_predictor\x18\x06 \x01(\x0b\x32\x1d.PredictorConfig.NSigmaConfigH\x00\x12@\n\x0flimit_predictor\x18\x07 \x01(\x0b\x32%.PredictorConfig.LimitPredictorConfigH\x00\x12W\n per_machine_percentile_predictor\x18\x08 \x01(\x0b\x32+.PredictorConfig.PerMachinePercentileConfigH\x00\x1a\x43\n\x12\x41vgPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a/\n\x14LimitPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x1a\x43\n\x12MaxPredictorConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x1a|\n\x15PerVMPercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x81\x01\n\x1aPerMachinePercentileConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x01:\x03\x31\x30\x30\x12\x1b\n\x13num_history_samples\x18\x04 \x01(\x03\x1a\x65\n\x0cNSigmaConfig\x12\x17\n\x0fmin_num_samples\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x1b\n\x13num_history_samples\x18\x03 \x01(\x03\x12\t\n\x01n\x18\x04 \x01(\x03\x1a\x14\n\x12\x41vgDecoratorConfig\x1a\x14\n\x12MaxDecoratorConfigB\x0b\n\tpredictor"\xfa\x01\n\x13\x46ortuneTellerConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csave_samples\x18\x02 \x01(\x08\x12\x33\n\x06oracle\x18\x03 \x01(\x0b\x32!.FortuneTellerConfig.OracleConfigH\x00\x12%\n\tpredictor\x18\x04 \x01(\x0b\x32\x10.PredictorConfigH\x00\x1aY\n\x0cOracleConfig\x12\x1a\n\x12horizon_in_seconds\x18\x01 \x01(\x03\x12\x14\n\x0c\x63\x61p_to_limit\x18\x02 \x01(\x08\x12\x17\n\npercentile\x18\x03 \x01(\x03:\x03\x31\x30\x30\x42\x08\n\x06teller"\xfa\x03\n\x10SimulationConfig\x12\x1c\n\x05input\x18\x01 \x01(\x0b\x32\r.DataLocation\x12\x19\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\t.VMFilter\x12&\n\x10\x66iltered_samples\x18\x03 \x01(\x0b\x32\x0c.LoadOrWrite\x12*\n\x14time_aligned_samples\x18\x04 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x06metric\x18\x05 \x01(\x0b\x32\x17.AbstractMetricSelector\x12\x33\n\x1dsamples_with_abstract_metrics\x18\x06 \x01(\x0b\x32\x0c.LoadOrWrite\x12\'\n\x0freset_and_shift\x18\x07 \x01(\x0b\x32\x0e.ResetAndShift\x12\x32\n\x1csamples_with_reset_and_shift\x18\x08 \x01(\x0b\x32\x0c.LoadOrWrite\x12\x1d\n\tscheduler\x18\t \x01(\x0b\x32\n.Scheduler\x12\'\n\x11scheduled_samples\x18\n \x01(\x0b\x32\x0c.LoadOrWrite\x12,\n\x0e\x66ortune_teller\x18\x0b \x03(\x0b\x32\x14.FortuneTellerConfig\x12(\n\x11simulation_result\x18\x0c \x01(\x0b\x32\r.DataLocation',
)
_INT64RANGE = _descriptor.Descriptor(
name="Int64Range",
full_name="Int64Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="lower_bound",
full_name="Int64Range.lower_bound",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="upper_bound",
full_name="Int64Range.upper_bound",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=26,
serialized_end=80,
)
_DATALOCATION = _descriptor.Descriptor(
name="DataLocation",
full_name="DataLocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="dataset",
full_name="DataLocation.dataset",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="table",
full_name="DataLocation.table",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=82,
serialized_end=128,
)
_VMFILTER = _descriptor.Descriptor(
name="VMFilter",
full_name="VMFilter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="VMFilter.start_time",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="VMFilter.end_time",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_non_top_level_vms",
full_name="VMFilter.remove_non_top_level_vms",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="priority_range",
full_name="VMFilter.priority_range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduling_class_range",
full_name="VMFilter.scheduling_class_range",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="machine_id",
full_name="VMFilter.machine_id",
index=5,
number=6,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=131,
serialized_end=315,
)
_LOADORWRITE = _descriptor.Descriptor(
name="LoadOrWrite",
full_name="LoadOrWrite",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="LoadOrWrite.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output",
full_name="LoadOrWrite.output",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="load_or_write",
full_name="LoadOrWrite.load_or_write",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=317,
serialized_end=412,
)
_ABSTRACTMETRICSELECTOR = _descriptor.Descriptor(
name="AbstractMetricSelector",
full_name="AbstractMetricSelector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="max_memory_usage",
full_name="AbstractMetricSelector.max_memory_usage",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cpu_usage_percentile",
full_name="AbstractMetricSelector.cpu_usage_percentile",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_cpu_usage",
full_name="AbstractMetricSelector.avg_cpu_usage",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_memory_usage",
full_name="AbstractMetricSelector.avg_memory_usage",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_cpu_usage",
full_name="AbstractMetricSelector.max_cpu_usage",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="metric",
full_name="AbstractMetricSelector.metric",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=415,
serialized_end=587,
)
_RESETANDSHIFT = _descriptor.Descriptor(
name="ResetAndShift",
full_name="ResetAndShift",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="reset_time_to_zero",
full_name="ResetAndShift.reset_time_to_zero",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="random_shift",
full_name="ResetAndShift.random_shift",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="ResetAndShift.seed",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=589,
serialized_end=681,
)
_SCHEDULER_ATRANDOM = _descriptor.Descriptor(
name="AtRandom",
full_name="Scheduler.AtRandom",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="num_machines",
full_name="Scheduler.AtRandom.num_machines",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed",
full_name="Scheduler.AtRandom.seed",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=791,
serialized_end=837,
)
_SCHEDULER = _descriptor.Descriptor(
name="Scheduler",
full_name="Scheduler",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="at_random",
full_name="Scheduler.at_random",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_machine_id",
full_name="Scheduler.by_machine_id",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="by_vm_unique_id",
full_name="Scheduler.by_vm_unique_id",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_SCHEDULER_ATRANDOM,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="scheduler",
full_name="Scheduler.scheduler",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=684,
serialized_end=850,
)
_PREDICTORCONFIG_AVGPREDICTORCONFIG = _descriptor.Descriptor(
name="AvgPredictorConfig",
full_name="PredictorConfig.AvgPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.AvgPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.AvgPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1462,
serialized_end=1529,
)
_PREDICTORCONFIG_LIMITPREDICTORCONFIG = _descriptor.Descriptor(
name="LimitPredictorConfig",
full_name="PredictorConfig.LimitPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.LimitPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1531,
serialized_end=1578,
)
_PREDICTORCONFIG_MAXPREDICTORCONFIG = _descriptor.Descriptor(
name="MaxPredictorConfig",
full_name="PredictorConfig.MaxPredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.MaxPredictorConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.MaxPredictorConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1580,
serialized_end=1647,
)
_PREDICTORCONFIG_PERVMPERCENTILECONFIG = _descriptor.Descriptor(
name="PerVMPercentileConfig",
full_name="PredictorConfig.PerVMPercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerVMPercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerVMPercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerVMPercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerVMPercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1649,
serialized_end=1773,
)
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG = _descriptor.Descriptor(
name="PerMachinePercentileConfig",
full_name="PredictorConfig.PerMachinePercentileConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.PerMachinePercentileConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="PredictorConfig.PerMachinePercentileConfig.percentile",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=True,
default_value=float(100),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.PerMachinePercentileConfig.num_history_samples",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1776,
serialized_end=1905,
)
_PREDICTORCONFIG_NSIGMACONFIG = _descriptor.Descriptor(
name="NSigmaConfig",
full_name="PredictorConfig.NSigmaConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="min_num_samples",
full_name="PredictorConfig.NSigmaConfig.min_num_samples",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="PredictorConfig.NSigmaConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_history_samples",
full_name="PredictorConfig.NSigmaConfig.num_history_samples",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n",
full_name="PredictorConfig.NSigmaConfig.n",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1907,
serialized_end=2008,
)
_PREDICTORCONFIG_AVGDECORATORCONFIG = _descriptor.Descriptor(
name="AvgDecoratorConfig",
full_name="PredictorConfig.AvgDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2010,
serialized_end=2030,
)
_PREDICTORCONFIG_MAXDECORATORCONFIG = _descriptor.Descriptor(
name="MaxDecoratorConfig",
full_name="PredictorConfig.MaxDecoratorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2032,
serialized_end=2052,
)
_PREDICTORCONFIG = _descriptor.Descriptor(
name="PredictorConfig",
full_name="PredictorConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="decorated_predictors",
full_name="PredictorConfig.decorated_predictors",
index=0,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_predictor",
full_name="PredictorConfig.avg_predictor",
index=1,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_predictor",
full_name="PredictorConfig.max_predictor",
index=2,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="avg_decorator",
full_name="PredictorConfig.avg_decorator",
index=3,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_decorator",
full_name="PredictorConfig.max_decorator",
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_vm_percentile_predictor",
full_name="PredictorConfig.per_vm_percentile_predictor",
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="n_sigma_predictor",
full_name="PredictorConfig.n_sigma_predictor",
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="limit_predictor",
full_name="PredictorConfig.limit_predictor",
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="per_machine_percentile_predictor",
full_name="PredictorConfig.per_machine_percentile_predictor",
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_PREDICTORCONFIG_AVGPREDICTORCONFIG,
_PREDICTORCONFIG_LIMITPREDICTORCONFIG,
_PREDICTORCONFIG_MAXPREDICTORCONFIG,
_PREDICTORCONFIG_PERVMPERCENTILECONFIG,
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
_PREDICTORCONFIG_NSIGMACONFIG,
_PREDICTORCONFIG_AVGDECORATORCONFIG,
_PREDICTORCONFIG_MAXDECORATORCONFIG,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="predictor",
full_name="PredictorConfig.predictor",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=853,
serialized_end=2065,
)
_FORTUNETELLERCONFIG_ORACLECONFIG = _descriptor.Descriptor(
name="OracleConfig",
full_name="FortuneTellerConfig.OracleConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="horizon_in_seconds",
full_name="FortuneTellerConfig.OracleConfig.horizon_in_seconds",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="cap_to_limit",
full_name="FortuneTellerConfig.OracleConfig.cap_to_limit",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="percentile",
full_name="FortuneTellerConfig.OracleConfig.percentile",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=True,
default_value=100,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2219,
serialized_end=2308,
)
_FORTUNETELLERCONFIG = _descriptor.Descriptor(
name="FortuneTellerConfig",
full_name="FortuneTellerConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="FortuneTellerConfig.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="save_samples",
full_name="FortuneTellerConfig.save_samples",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="oracle",
full_name="FortuneTellerConfig.oracle",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="predictor",
full_name="FortuneTellerConfig.predictor",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FORTUNETELLERCONFIG_ORACLECONFIG,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="teller",
full_name="FortuneTellerConfig.teller",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=2068,
serialized_end=2318,
)
_SIMULATIONCONFIG = _descriptor.Descriptor(
name="SimulationConfig",
full_name="SimulationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="SimulationConfig.input",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="SimulationConfig.filter",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="filtered_samples",
full_name="SimulationConfig.filtered_samples",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="time_aligned_samples",
full_name="SimulationConfig.time_aligned_samples",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="metric",
full_name="SimulationConfig.metric",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_abstract_metrics",
full_name="SimulationConfig.samples_with_abstract_metrics",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reset_and_shift",
full_name="SimulationConfig.reset_and_shift",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="samples_with_reset_and_shift",
full_name="SimulationConfig.samples_with_reset_and_shift",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduler",
full_name="SimulationConfig.scheduler",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="scheduled_samples",
full_name="SimulationConfig.scheduled_samples",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="fortune_teller",
full_name="SimulationConfig.fortune_teller",
index=10,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="simulation_result",
full_name="SimulationConfig.simulation_result",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=2321,
serialized_end=2827,
)
_VMFILTER.fields_by_name["priority_range"].message_type = _INT64RANGE
_VMFILTER.fields_by_name["scheduling_class_range"].message_type = _INT64RANGE
_LOADORWRITE.fields_by_name["input"].message_type = _DATALOCATION
_LOADORWRITE.fields_by_name["output"].message_type = _DATALOCATION
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["input"]
)
_LOADORWRITE.fields_by_name["input"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_LOADORWRITE.oneofs_by_name["load_or_write"].fields.append(
_LOADORWRITE.fields_by_name["output"]
)
_LOADORWRITE.fields_by_name["output"].containing_oneof = _LOADORWRITE.oneofs_by_name[
"load_or_write"
]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["cpu_usage_percentile"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"cpu_usage_percentile"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["avg_memory_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"avg_memory_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"].fields.append(
_ABSTRACTMETRICSELECTOR.fields_by_name["max_cpu_usage"]
)
_ABSTRACTMETRICSELECTOR.fields_by_name[
"max_cpu_usage"
].containing_oneof = _ABSTRACTMETRICSELECTOR.oneofs_by_name["metric"]
_RESETANDSHIFT.fields_by_name["random_shift"].message_type = _INT64RANGE
_SCHEDULER_ATRANDOM.containing_type = _SCHEDULER
_SCHEDULER.fields_by_name["at_random"].message_type = _SCHEDULER_ATRANDOM
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["at_random"]
)
_SCHEDULER.fields_by_name["at_random"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_machine_id"]
)
_SCHEDULER.fields_by_name["by_machine_id"].containing_oneof = _SCHEDULER.oneofs_by_name[
"scheduler"
]
_SCHEDULER.oneofs_by_name["scheduler"].fields.append(
_SCHEDULER.fields_by_name["by_vm_unique_id"]
)
_SCHEDULER.fields_by_name[
"by_vm_unique_id"
].containing_oneof = _SCHEDULER.oneofs_by_name["scheduler"]
_PREDICTORCONFIG_AVGPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_LIMITPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXPREDICTORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERVMPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_NSIGMACONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_AVGDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG_MAXDECORATORCONFIG.containing_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name["decorated_predictors"].message_type = _PREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].message_type = _PREDICTORCONFIG_AVGPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].message_type = _PREDICTORCONFIG_MAXPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].message_type = _PREDICTORCONFIG_AVGDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].message_type = _PREDICTORCONFIG_MAXDECORATORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERVMPERCENTILECONFIG
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].message_type = _PREDICTORCONFIG_NSIGMACONFIG
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].message_type = _PREDICTORCONFIG_LIMITPREDICTORCONFIG
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].message_type = _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"max_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["avg_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"avg_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["max_decorator"]
)
_PREDICTORCONFIG.fields_by_name[
"max_decorator"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_vm_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_vm_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["n_sigma_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"n_sigma_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["limit_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"limit_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_PREDICTORCONFIG.oneofs_by_name["predictor"].fields.append(
_PREDICTORCONFIG.fields_by_name["per_machine_percentile_predictor"]
)
_PREDICTORCONFIG.fields_by_name[
"per_machine_percentile_predictor"
].containing_oneof = _PREDICTORCONFIG.oneofs_by_name["predictor"]
_FORTUNETELLERCONFIG_ORACLECONFIG.containing_type = _FORTUNETELLERCONFIG
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].message_type = _FORTUNETELLERCONFIG_ORACLECONFIG
_FORTUNETELLERCONFIG.fields_by_name["predictor"].message_type = _PREDICTORCONFIG
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["oracle"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"oracle"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_FORTUNETELLERCONFIG.oneofs_by_name["teller"].fields.append(
_FORTUNETELLERCONFIG.fields_by_name["predictor"]
)
_FORTUNETELLERCONFIG.fields_by_name[
"predictor"
].containing_oneof = _FORTUNETELLERCONFIG.oneofs_by_name["teller"]
_SIMULATIONCONFIG.fields_by_name["input"].message_type = _DATALOCATION
_SIMULATIONCONFIG.fields_by_name["filter"].message_type = _VMFILTER
_SIMULATIONCONFIG.fields_by_name["filtered_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["time_aligned_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["metric"].message_type = _ABSTRACTMETRICSELECTOR
_SIMULATIONCONFIG.fields_by_name[
"samples_with_abstract_metrics"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["reset_and_shift"].message_type = _RESETANDSHIFT
_SIMULATIONCONFIG.fields_by_name[
"samples_with_reset_and_shift"
].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["scheduler"].message_type = _SCHEDULER
_SIMULATIONCONFIG.fields_by_name["scheduled_samples"].message_type = _LOADORWRITE
_SIMULATIONCONFIG.fields_by_name["fortune_teller"].message_type = _FORTUNETELLERCONFIG
_SIMULATIONCONFIG.fields_by_name["simulation_result"].message_type = _DATALOCATION
DESCRIPTOR.message_types_by_name["Int64Range"] = _INT64RANGE
DESCRIPTOR.message_types_by_name["DataLocation"] = _DATALOCATION
DESCRIPTOR.message_types_by_name["VMFilter"] = _VMFILTER
DESCRIPTOR.message_types_by_name["LoadOrWrite"] = _LOADORWRITE
DESCRIPTOR.message_types_by_name["AbstractMetricSelector"] = _ABSTRACTMETRICSELECTOR
DESCRIPTOR.message_types_by_name["ResetAndShift"] = _RESETANDSHIFT
DESCRIPTOR.message_types_by_name["Scheduler"] = _SCHEDULER
DESCRIPTOR.message_types_by_name["PredictorConfig"] = _PREDICTORCONFIG
DESCRIPTOR.message_types_by_name["FortuneTellerConfig"] = _FORTUNETELLERCONFIG
DESCRIPTOR.message_types_by_name["SimulationConfig"] = _SIMULATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Int64Range = _reflection.GeneratedProtocolMessageType(
"Int64Range",
(_message.Message,),
{
"DESCRIPTOR": _INT64RANGE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Int64Range)
},
)
_sym_db.RegisterMessage(Int64Range)
DataLocation = _reflection.GeneratedProtocolMessageType(
"DataLocation",
(_message.Message,),
{
"DESCRIPTOR": _DATALOCATION,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:DataLocation)
},
)
_sym_db.RegisterMessage(DataLocation)
VMFilter = _reflection.GeneratedProtocolMessageType(
"VMFilter",
(_message.Message,),
{
"DESCRIPTOR": _VMFILTER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:VMFilter)
},
)
_sym_db.RegisterMessage(VMFilter)
LoadOrWrite = _reflection.GeneratedProtocolMessageType(
"LoadOrWrite",
(_message.Message,),
{
"DESCRIPTOR": _LOADORWRITE,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:LoadOrWrite)
},
)
_sym_db.RegisterMessage(LoadOrWrite)
AbstractMetricSelector = _reflection.GeneratedProtocolMessageType(
"AbstractMetricSelector",
(_message.Message,),
{
"DESCRIPTOR": _ABSTRACTMETRICSELECTOR,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:AbstractMetricSelector)
},
)
_sym_db.RegisterMessage(AbstractMetricSelector)
ResetAndShift = _reflection.GeneratedProtocolMessageType(
"ResetAndShift",
(_message.Message,),
{
"DESCRIPTOR": _RESETANDSHIFT,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:ResetAndShift)
},
)
_sym_db.RegisterMessage(ResetAndShift)
Scheduler = _reflection.GeneratedProtocolMessageType(
"Scheduler",
(_message.Message,),
{
"AtRandom": _reflection.GeneratedProtocolMessageType(
"AtRandom",
(_message.Message,),
{
"DESCRIPTOR": _SCHEDULER_ATRANDOM,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler.AtRandom)
},
),
"DESCRIPTOR": _SCHEDULER,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:Scheduler)
},
)
_sym_db.RegisterMessage(Scheduler)
_sym_db.RegisterMessage(Scheduler.AtRandom)
PredictorConfig = _reflection.GeneratedProtocolMessageType(
"PredictorConfig",
(_message.Message,),
{
"AvgPredictorConfig": _reflection.GeneratedProtocolMessageType(
"AvgPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgPredictorConfig)
},
),
"LimitPredictorConfig": _reflection.GeneratedProtocolMessageType(
"LimitPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_LIMITPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.LimitPredictorConfig)
},
),
"MaxPredictorConfig": _reflection.GeneratedProtocolMessageType(
"MaxPredictorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXPREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxPredictorConfig)
},
),
"PerVMPercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerVMPercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERVMPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerVMPercentileConfig)
},
),
"PerMachinePercentileConfig": _reflection.GeneratedProtocolMessageType(
"PerMachinePercentileConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_PERMACHINEPERCENTILECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.PerMachinePercentileConfig)
},
),
"NSigmaConfig": _reflection.GeneratedProtocolMessageType(
"NSigmaConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_NSIGMACONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.NSigmaConfig)
},
),
"AvgDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"AvgDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_AVGDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.AvgDecoratorConfig)
},
),
"MaxDecoratorConfig": _reflection.GeneratedProtocolMessageType(
"MaxDecoratorConfig",
(_message.Message,),
{
"DESCRIPTOR": _PREDICTORCONFIG_MAXDECORATORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig.MaxDecoratorConfig)
},
),
"DESCRIPTOR": _PREDICTORCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:PredictorConfig)
},
)
_sym_db.RegisterMessage(PredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.LimitPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxPredictorConfig)
_sym_db.RegisterMessage(PredictorConfig.PerVMPercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.PerMachinePercentileConfig)
_sym_db.RegisterMessage(PredictorConfig.NSigmaConfig)
_sym_db.RegisterMessage(PredictorConfig.AvgDecoratorConfig)
_sym_db.RegisterMessage(PredictorConfig.MaxDecoratorConfig)
FortuneTellerConfig = _reflection.GeneratedProtocolMessageType(
"FortuneTellerConfig",
(_message.Message,),
{
"OracleConfig": _reflection.GeneratedProtocolMessageType(
"OracleConfig",
(_message.Message,),
{
"DESCRIPTOR": _FORTUNETELLERCONFIG_ORACLECONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig.OracleConfig)
},
),
"DESCRIPTOR": _FORTUNETELLERCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:FortuneTellerConfig)
},
)
_sym_db.RegisterMessage(FortuneTellerConfig)
_sym_db.RegisterMessage(FortuneTellerConfig.OracleConfig)
SimulationConfig = _reflection.GeneratedProtocolMessageType(
"SimulationConfig",
(_message.Message,),
{
"DESCRIPTOR": _SIMULATIONCONFIG,
"__module__": "simulator.config_pb2"
# @@protoc_insertion_point(class_scope:SimulationConfig)
},
)
_sym_db.RegisterMessage(SimulationConfig)
# @@protoc_insertion_point(module_scope)
| true
| true
|
1c464d12c804104184ab9202416708560155519f
| 1,270
|
py
|
Python
|
packages/pyre/weaver/MixedComments.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/weaver/MixedComments.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/weaver/MixedComments.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
class MixedComments:
"""
The mixed commenting strategy: both a block marker pair and an individual line marker
"""
# implemented interface
def commentBlock(self, lines):
"""
Create a comment block out of the given {lines}
"""
# build the leader
leader = self.leader + self.commentMarker
# place the start comment block marker
yield self.leader + self.startBlock
# iterate over the {lines}
for line in lines:
# and render each one
yield leader + ' ' + line
# place the end comment block marker
yield self.leader + ' ' + self.startBlock
# all done
return
def commentLine(self, line):
"""
Mark {line} as a comment
"""
# build the leader
leader = self.leader + self.commentMarker
# if the line is non-empty
if line:
# mark it
return leader + ' ' + line
# otherwise, just return the comment characters
return leader
# private data
endBlock = None
startBlock = None
commentMarker = None
# end of file
| 22.678571
| 89
| 0.568504
|
class MixedComments:
def commentBlock(self, lines):
leader = self.leader + self.commentMarker
yield self.leader + self.startBlock
for line in lines:
yield leader + ' ' + line
yield self.leader + ' ' + self.startBlock
return
def commentLine(self, line):
leader = self.leader + self.commentMarker
if line:
return leader + ' ' + line
return leader
endBlock = None
startBlock = None
commentMarker = None
| true
| true
|
1c464e80baae8523873eba7c28b31180433a9491
| 244
|
py
|
Python
|
accounts/templatetags/account_tags.py
|
GadirMirzayev/Django-E-commerce
|
0ca289fdf584b29636a8fc9416319defad0be5a5
|
[
"MIT"
] | 1
|
2021-08-20T07:44:39.000Z
|
2021-08-20T07:44:39.000Z
|
accounts/templatetags/account_tags.py
|
GadirMirzayev/Django-E-commerce
|
0ca289fdf584b29636a8fc9416319defad0be5a5
|
[
"MIT"
] | null | null | null |
accounts/templatetags/account_tags.py
|
GadirMirzayev/Django-E-commerce
|
0ca289fdf584b29636a8fc9416319defad0be5a5
|
[
"MIT"
] | null | null | null |
from django.template import Library
from accounts.forms import LoginForm, RegistrationForm
register = Library()
@register.simple_tag
def get_login():
return LoginForm
@register.simple_tag
def get_register():
return RegistrationForm
| 17.428571
| 54
| 0.795082
|
from django.template import Library
from accounts.forms import LoginForm, RegistrationForm
register = Library()
@register.simple_tag
def get_login():
return LoginForm
@register.simple_tag
def get_register():
return RegistrationForm
| true
| true
|
1c464ec8780d8f5ce3fb571d62ddf71de207f74c
| 2,383
|
py
|
Python
|
app/words.py
|
anbasile/mwe
|
2a56b889c7c7f28aa479e477f8e52da7501c2691
|
[
"Apache-2.0"
] | null | null | null |
app/words.py
|
anbasile/mwe
|
2a56b889c7c7f28aa479e477f8e52da7501c2691
|
[
"Apache-2.0"
] | 2
|
2016-08-31T16:21:31.000Z
|
2016-09-10T21:50:12.000Z
|
app/words.py
|
anbasile/mwe
|
2a56b889c7c7f28aa479e477f8e52da7501c2691
|
[
"Apache-2.0"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
import pandas as pd
import json
import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
from lightning import Lightning
from colorsys import hsv_to_rgb
from sklearn import datasets
lgn = Lightning(host='http://public.lightning-viz.org')
def calculate(words):
# instantiate a dictionary to later be filled with word:miscores
wc = defaultdict(float)
frames = []
print("...it will take a while. Wait a sec...")
for word in words:
payload = {'searchstring': word.encode('ascii'),
'searchpositional':'word',
'searchpostag':'all',
'contextsize':'60c',
'sort2':'right',
'terminate':'100',
'searchtype':'coll',
'mistat':'on',
'collocspanleft':'2',
'collocspanright':'2',
'collocfilter':'noun'}
r = requests.get("http://clic.cimec.unitn.it/cgi-bin/cqp/cqp.pl?corpuslist=WEBBIT", params=payload)
soup = BeautifulSoup(r.content, 'lxml')
# parse the html table and extract words and miscores. Add scores
temp = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
word = tds[0].text.split('~~')[1]
mi = float(tds[4].text)
wc[word] += mi
temp.append(map(lambda x:x.text,tds[0:]))
x = pd.DataFrame(temp)
df = pd.DataFrame()
df['coll'] = x.ix[0:,0].apply(lambda x: x.split('~~')[1])
df['word'] = x.ix[0:,0].apply(lambda x: x.split('~~')[0])
df['mi'] = x.ix[0:,4]
frames.append(df)
#sort the results in decreasing order
results = []
for w in sorted(wc, key=wc.get, reverse=True):
results.append((w, wc[w]))
#spit out the top result. If using ipython you can check the rest of the list by tiping `results`
#viz part
results_df = pd.concat(frames)
G=nx.from_pandas_dataframe(results_df, 'word','coll',['mi'])
mat = nx.adjacency_matrix(G).todense()
viz = lgn.force(mat)
vid = viz.id
print(vid)
url = '<iframe src="http://public.lightning-viz.org/visualizations/'+vid+'/iframe/" width=100% height=400px>'
return (results[0][0].strip(),url)
| 35.567164
| 113
| 0.578682
|
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
import pandas as pd
import json
import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
from lightning import Lightning
from colorsys import hsv_to_rgb
from sklearn import datasets
lgn = Lightning(host='http://public.lightning-viz.org')
def calculate(words):
wc = defaultdict(float)
frames = []
print("...it will take a while. Wait a sec...")
for word in words:
payload = {'searchstring': word.encode('ascii'),
'searchpositional':'word',
'searchpostag':'all',
'contextsize':'60c',
'sort2':'right',
'terminate':'100',
'searchtype':'coll',
'mistat':'on',
'collocspanleft':'2',
'collocspanright':'2',
'collocfilter':'noun'}
r = requests.get("http://clic.cimec.unitn.it/cgi-bin/cqp/cqp.pl?corpuslist=WEBBIT", params=payload)
soup = BeautifulSoup(r.content, 'lxml')
temp = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
word = tds[0].text.split('~~')[1]
mi = float(tds[4].text)
wc[word] += mi
temp.append(map(lambda x:x.text,tds[0:]))
x = pd.DataFrame(temp)
df = pd.DataFrame()
df['coll'] = x.ix[0:,0].apply(lambda x: x.split('~~')[1])
df['word'] = x.ix[0:,0].apply(lambda x: x.split('~~')[0])
df['mi'] = x.ix[0:,4]
frames.append(df)
results = []
for w in sorted(wc, key=wc.get, reverse=True):
results.append((w, wc[w]))
results_df = pd.concat(frames)
G=nx.from_pandas_dataframe(results_df, 'word','coll',['mi'])
mat = nx.adjacency_matrix(G).todense()
viz = lgn.force(mat)
vid = viz.id
print(vid)
url = '<iframe src="http://public.lightning-viz.org/visualizations/'+vid+'/iframe/" width=100% height=400px>'
return (results[0][0].strip(),url)
| true
| true
|
1c464fea142c0d5443ee2c8f9823dac623cc81f2
| 10,404
|
py
|
Python
|
gui/kivy/uix/dialogs/settings.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/settings.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
gui/kivy/uix/dialogs/settings.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from reddelectrum.util import base_units
from reddelectrum.i18n import languages
from reddelectrum_gui.kivy.i18n import _
from reddelectrum.plugins import run_hook
from reddelectrum import coinchooser
from reddelectrum.util import fee_levels
from choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_ltc_gui.kivy.i18n._
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
status: '' if root.disable_pin else ('ON' if root.use_encryption else 'OFF')
disabled: root.disable_pin
title: _('PIN code') + ': ' + self.status
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Reddcoin amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fee_status()
title: _('Fees') + ': ' + self.status
description: _("Fees paid to the Reddcoin miners.")
action: partial(root.fee_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: 'ON' if app.use_rbf else 'OFF'
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
message:
_('If you check this box, your transactions will be marked as non-final,') \
+ ' ' + _('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pays higher fees.') \
+ ' ' + _('Note that some merchants do not accept non-final transactions until they are confirmed.')
action: partial(root.boolean_dialog, 'use_rbf', _('Replace by fee'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_unconfirmed else _('No')
title: _('Spend unconfirmed') + ': ' + self.status
description: _("Use unconfirmed coins in transactions.")
message: _('Spend unconfirmed coins')
action: partial(root.boolean_dialog, 'use_unconfirmed', _('Use unconfirmed'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_change else _('No')
title: _('Use change addresses') + ': ' + self.status
description: _("Send your change to separate addresses.")
message: _('Send excess coins to change addresses')
action: partial(root.boolean_dialog, 'use_change', _('Use change addresses'), self.message)
CardSeparator
SettingsItem:
status: root.coinselect_status()
title: _('Coin selection') + ': ' + self.status
description: "Coin selection method"
action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
# cached dialogs
self._fx_dialog = None
self._fee_dialog = None
self._proxy_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.has_password() if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units.keys(), self.app.base_unit, cb)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def proxy_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return proxy.get('host') +':' + proxy.get('port') if proxy else _('None')
def proxy_dialog(self, item, dt):
if self._proxy_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def callback(popup):
if popup.ids.mode.text != 'None':
proxy = {
'mode':popup.ids.mode.text,
'host':popup.ids.host.text,
'port':popup.ids.port.text,
'user':popup.ids.user.text,
'password':popup.ids.password.text
}
else:
proxy = None
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.proxy_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/proxy.kv')
popup.ids.mode.text = proxy.get('mode') if proxy else 'None'
popup.ids.host.text = proxy.get('host') if proxy else ''
popup.ids.port.text = proxy.get('port') if proxy else ''
popup.ids.user.text = proxy.get('user') if proxy else ''
popup.ids.password.text = proxy.get('password') if proxy else ''
popup.on_dismiss = lambda: callback(popup)
self._proxy_dialog = popup
self._proxy_dialog.open()
def plugin_dialog(self, name, label, dt):
from checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
if self.config.get('dynamic_fees', True):
return fee_levels[self.config.get('fee_level', 2)]
else:
return self.app.format_amount_and_units(self.config.fee_per_kb()) + '/kB'
def fee_dialog(self, label, dt):
if self._fee_dialog is None:
from fee_dialog import FeeDialog
def cb():
label.status = self.fee_status()
self._fee_dialog = FeeDialog(self.app, self.config, cb)
self._fee_dialog.open()
def boolean_dialog(self, name, title, message, dt):
from checkbox_dialog import CheckBoxDialog
CheckBoxDialog(title, message, getattr(self.app, name), lambda x: setattr(self.app, name, x)).open()
def fx_status(self):
fx = self.app.fx
if fx.is_enabled():
source = fx.exchange.name()
ccy = fx.get_currency()
return '%s [%s]' %(ccy, source)
else:
return _('None')
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
| 43.714286
| 157
| 0.569685
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from reddelectrum.util import base_units
from reddelectrum.i18n import languages
from reddelectrum_gui.kivy.i18n import _
from reddelectrum.plugins import run_hook
from reddelectrum import coinchooser
from reddelectrum.util import fee_levels
from choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum_ltc_gui.kivy.i18n._
<SettingsDialog@Popup>
id: settings
title: _('Electrum Settings')
disable_pin: False
use_encryption: False
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
id: scrollviewlayout
cols:1
size_hint: 1, None
height: self.minimum_height
padding: '10dp'
SettingsItem:
lang: settings.get_language_name()
title: 'Language' + ': ' + str(self.lang)
description: _('Language')
action: partial(root.language_dialog, self)
CardSeparator
SettingsItem:
status: '' if root.disable_pin else ('ON' if root.use_encryption else 'OFF')
disabled: root.disable_pin
title: _('PIN code') + ': ' + self.status
description: _("Change your PIN code.")
action: partial(root.change_password, self)
CardSeparator
SettingsItem:
bu: app.base_unit
title: _('Denomination') + ': ' + self.bu
description: _("Base unit for Reddcoin amounts.")
action: partial(root.unit_dialog, self)
CardSeparator
SettingsItem:
status: root.fee_status()
title: _('Fees') + ': ' + self.status
description: _("Fees paid to the Reddcoin miners.")
action: partial(root.fee_dialog, self)
CardSeparator
SettingsItem:
status: root.fx_status()
title: _('Fiat Currency') + ': ' + self.status
description: _("Display amounts in fiat currency.")
action: partial(root.fx_dialog, self)
CardSeparator
SettingsItem:
status: 'ON' if bool(app.plugins.get('labels')) else 'OFF'
title: _('Labels Sync') + ': ' + self.status
description: _("Save and synchronize your labels.")
action: partial(root.plugin_dialog, 'labels', self)
CardSeparator
SettingsItem:
status: 'ON' if app.use_rbf else 'OFF'
title: _('Replace-by-fee') + ': ' + self.status
description: _("Create replaceable transactions.")
message:
_('If you check this box, your transactions will be marked as non-final,') \
+ ' ' + _('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pays higher fees.') \
+ ' ' + _('Note that some merchants do not accept non-final transactions until they are confirmed.')
action: partial(root.boolean_dialog, 'use_rbf', _('Replace by fee'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_unconfirmed else _('No')
title: _('Spend unconfirmed') + ': ' + self.status
description: _("Use unconfirmed coins in transactions.")
message: _('Spend unconfirmed coins')
action: partial(root.boolean_dialog, 'use_unconfirmed', _('Use unconfirmed'), self.message)
CardSeparator
SettingsItem:
status: _('Yes') if app.use_change else _('No')
title: _('Use change addresses') + ': ' + self.status
description: _("Send your change to separate addresses.")
message: _('Send excess coins to change addresses')
action: partial(root.boolean_dialog, 'use_change', _('Use change addresses'), self.message)
CardSeparator
SettingsItem:
status: root.coinselect_status()
title: _('Coin selection') + ': ' + self.status
description: "Coin selection method"
action: partial(root.coinselect_dialog, self)
''')
class SettingsDialog(Factory.Popup):
def __init__(self, app):
self.app = app
self.plugins = self.app.plugins
self.config = self.app.electrum_config
Factory.Popup.__init__(self)
layout = self.ids.scrollviewlayout
layout.bind(minimum_height=layout.setter('height'))
self._fx_dialog = None
self._fee_dialog = None
self._proxy_dialog = None
self._language_dialog = None
self._unit_dialog = None
self._coinselect_dialog = None
def update(self):
self.wallet = self.app.wallet
self.disable_pin = self.wallet.is_watching_only() if self.wallet else True
self.use_encryption = self.wallet.has_password() if self.wallet else False
def get_language_name(self):
return languages.get(self.config.get('language', 'en_UK'), '')
def change_password(self, item, dt):
self.app.change_password(self.update)
def language_dialog(self, item, dt):
if self._language_dialog is None:
l = self.config.get('language', 'en_UK')
def cb(key):
self.config.set_key("language", key, True)
item.lang = self.get_language_name()
self.app.language = key
self._language_dialog = ChoiceDialog(_('Language'), languages, l, cb)
self._language_dialog.open()
def unit_dialog(self, item, dt):
if self._unit_dialog is None:
def cb(text):
self.app._set_bu(text)
item.bu = self.app.base_unit
self._unit_dialog = ChoiceDialog(_('Denomination'), base_units.keys(), self.app.base_unit, cb)
self._unit_dialog.open()
def coinselect_status(self):
return coinchooser.get_name(self.app.electrum_config)
def coinselect_dialog(self, item, dt):
if self._coinselect_dialog is None:
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
def cb(text):
self.config.set_key('coin_chooser', text)
item.status = text
self._coinselect_dialog = ChoiceDialog(_('Coin selection'), choosers, chooser_name, cb)
self._coinselect_dialog.open()
def proxy_status(self):
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
return proxy.get('host') +':' + proxy.get('port') if proxy else _('None')
def proxy_dialog(self, item, dt):
if self._proxy_dialog is None:
server, port, protocol, proxy, auto_connect = self.app.network.get_parameters()
def callback(popup):
if popup.ids.mode.text != 'None':
proxy = {
'mode':popup.ids.mode.text,
'host':popup.ids.host.text,
'port':popup.ids.port.text,
'user':popup.ids.user.text,
'password':popup.ids.password.text
}
else:
proxy = None
self.app.network.set_parameters(server, port, protocol, proxy, auto_connect)
item.status = self.proxy_status()
popup = Builder.load_file('gui/kivy/uix/ui_screens/proxy.kv')
popup.ids.mode.text = proxy.get('mode') if proxy else 'None'
popup.ids.host.text = proxy.get('host') if proxy else ''
popup.ids.port.text = proxy.get('port') if proxy else ''
popup.ids.user.text = proxy.get('user') if proxy else ''
popup.ids.password.text = proxy.get('password') if proxy else ''
popup.on_dismiss = lambda: callback(popup)
self._proxy_dialog = popup
self._proxy_dialog.open()
def plugin_dialog(self, name, label, dt):
from checkbox_dialog import CheckBoxDialog
def callback(status):
self.plugins.enable(name) if status else self.plugins.disable(name)
label.status = 'ON' if status else 'OFF'
status = bool(self.plugins.get(name))
dd = self.plugins.descriptions.get(name)
descr = dd.get('description')
fullname = dd.get('fullname')
d = CheckBoxDialog(fullname, descr, status, callback)
d.open()
def fee_status(self):
if self.config.get('dynamic_fees', True):
return fee_levels[self.config.get('fee_level', 2)]
else:
return self.app.format_amount_and_units(self.config.fee_per_kb()) + '/kB'
def fee_dialog(self, label, dt):
if self._fee_dialog is None:
from fee_dialog import FeeDialog
def cb():
label.status = self.fee_status()
self._fee_dialog = FeeDialog(self.app, self.config, cb)
self._fee_dialog.open()
def boolean_dialog(self, name, title, message, dt):
from checkbox_dialog import CheckBoxDialog
CheckBoxDialog(title, message, getattr(self.app, name), lambda x: setattr(self.app, name, x)).open()
def fx_status(self):
fx = self.app.fx
if fx.is_enabled():
source = fx.exchange.name()
ccy = fx.get_currency()
return '%s [%s]' %(ccy, source)
else:
return _('None')
def fx_dialog(self, label, dt):
if self._fx_dialog is None:
from fx_dialog import FxDialog
def cb():
label.status = self.fx_status()
self._fx_dialog = FxDialog(self.app, self.plugins, self.config, cb)
self._fx_dialog.open()
| true
| true
|
1c46504895e0e2d1fa84256a4ac14e48db7125f9
| 19,813
|
py
|
Python
|
Lib/site-packages/pygments/lexers/html.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2021-12-14T21:23:25.000Z
|
2021-12-14T21:23:25.000Z
|
Lib/site-packages/pygments/lexers/html.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1,242
|
2019-08-31T16:03:19.000Z
|
2019-08-31T18:00:46.000Z
|
Lib/site-packages/pygments/lexers/html.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2019-10-04T01:56:03.000Z
|
2019-10-04T01:56:03.000Z
|
"""
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'PugLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
url = 'https://html.spec.whatwg.org/'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
# fallback cases for when there is no closing script tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(JavascriptLexer), '#pop'),
(r'.+', using(JavascriptLexer), '#pop'),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
# fallback cases for when there is no closing style tag
# first look for newline and then go back into root state
# if that fails just read the rest of the file
# this is similar to the error handling logic in lexer.py
(r'.+?\n', using(CssLexer), '#pop'),
(r'.+', using(CssLexer), '#pop'),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = {
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
}
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accommodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
(r'\[' + _dot + r'*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class PugLexer(ExtendedRegexLexer):
"""
For Pug markup.
Pug is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer # compat
| 32.74876
| 83
| 0.4225
|
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'PugLexer']
class HtmlLexer(RegexLexer):
name = 'HTML'
url = 'https://html.spec.whatwg.org/'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
(r'.+?\n', using(JavascriptLexer), '#pop'),
(r'.+', using(JavascriptLexer), '#pop'),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
(r'.+?\n', using(CssLexer), '#pop'),
(r'.+', using(CssLexer), '#pop'),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)',
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
(r'<!--.*?-->', Comment.Multiline),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
(r'\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = {
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
}
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accommodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
(r'\[' + _dot + r'*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class PugLexer(ExtendedRegexLexer):
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
(r'\[' + _dot + r'*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
(r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + r'*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer
| true
| true
|
1c46543448022df5270116046c61a0e794fe676d
| 5,784
|
py
|
Python
|
shakenfist/daemons/resources.py
|
fidoandfido/shakenfist
|
18612b27649310fb2d6ea1b32dce89640e8c857d
|
[
"Apache-2.0"
] | null | null | null |
shakenfist/daemons/resources.py
|
fidoandfido/shakenfist
|
18612b27649310fb2d6ea1b32dce89640e8c857d
|
[
"Apache-2.0"
] | null | null | null |
shakenfist/daemons/resources.py
|
fidoandfido/shakenfist
|
18612b27649310fb2d6ea1b32dce89640e8c857d
|
[
"Apache-2.0"
] | null | null | null |
import os
import psutil
import time
from prometheus_client import Gauge
from prometheus_client import start_http_server
from shakenfist.daemons import daemon
from shakenfist import config
from shakenfist import db
from shakenfist import logutil
from shakenfist import util
LOG, _ = logutil.setup(__name__)
def _get_stats():
libvirt = util.get_libvirt()
retval = {}
conn = libvirt.open(None)
# CPU info
present_cpus, _, available_cpus = conn.getCPUMap()
retval.update({
'cpu_max': present_cpus,
'cpu_available': available_cpus,
})
retval['cpu_max_per_instance'] = conn.getMaxVcpus(None)
# This is disabled as data we don't currently use
# for i in range(present_cpus):
# per_cpu_stats = conn.getCPUStats(i)
# for key in per_cpu_stats:
# retval['cpu_core%d_%s' % (i, key)] = per_cpu_stats[key]
try:
load_1, load_5, load_15 = psutil.getloadavg()
retval.update({
'cpu_load_1': load_1,
'cpu_load_5': load_5,
'cpu_load_15': load_15,
})
except Exception as e:
util.ignore_exception('load average', e)
# System memory info, converting bytes to mb
stats = psutil.virtual_memory()
retval.update({
'memory_max': stats.total // 1024 // 1024,
'memory_available': stats.available // 1024 // 1024
})
# libvirt memory info, converting kb to mb
memory_status = conn.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS)
retval.update({
'memory_max_libvirt': memory_status['total'] // 1024,
'memory_available_libvirt': memory_status['free'] // 1024,
})
# Kernel Shared Memory (KSM) information
ksm_details = {}
for ent in os.listdir('/sys/kernel/mm/ksm'):
with open('/sys/kernel/mm/ksm/%s' % ent) as f:
ksm_details['memory_ksm_%s' % ent] = int(f.read().rstrip())
retval.update(ksm_details)
# Disk info
s = os.statvfs(config.parsed.get('STORAGE_PATH'))
disk_counters = psutil.disk_io_counters()
retval.update({
'disk_total': s.f_frsize * s.f_blocks,
'disk_free': s.f_frsize * s.f_bavail,
'disk_used': s.f_frsize * (s.f_blocks - s.f_bfree),
'disk_read_bytes': disk_counters.read_bytes,
'disk_write_bytes': disk_counters.write_bytes,
})
# Network info
net_counters = psutil.net_io_counters()
retval.update({
'network_read_bytes': net_counters.bytes_recv,
'network_write_bytes': net_counters.bytes_sent,
})
# Virtual machine consumption info
total_instances = 0
total_active_instances = 0
total_instance_max_memory = 0
total_instance_actual_memory = 0
total_instance_vcpus = 0
total_instance_cpu_time = 0
for guest in conn.listAllDomains():
try:
active = guest.isActive() == 1
except Exception:
active = False
_, maxmem, mem, cpus, cpu_time = guest.info()
if active:
total_instances += 1
total_active_instances += 1
total_instance_max_memory += maxmem
total_instance_actual_memory += mem
total_instance_vcpus += cpus
total_instance_cpu_time += cpu_time
# Queue health statistics
node_queue_processing, node_queue_waiting = db.get_queue_length(
config.parsed.get('NODE_NAME'))
retval.update({
'cpu_total_instance_vcpus': total_instance_vcpus,
'cpu_total_instance_cpu_time': total_instance_cpu_time,
'memory_total_instance_max': total_instance_max_memory // 1024,
'memory_total_instance_actual': total_instance_actual_memory // 1024,
'instances_total': total_instances,
'instances_active': total_active_instances,
'node_queue_processing': node_queue_processing,
'node_queue_waiting': node_queue_waiting,
})
if util.is_network_node():
network_queue_processing, network_queue_waiting = db.get_queue_length(
'networknode')
retval.update({
'network_queue_processing': network_queue_processing,
'network_queue_waiting': network_queue_waiting,
})
return retval
class Monitor(daemon.Daemon):
def __init__(self, id):
super(Monitor, self).__init__(id)
start_http_server(config.parsed.get('PROMETHEUS_METRICS_PORT'))
def run(self):
LOG.info('Starting')
gauges = {
'updated_at': Gauge('updated_at', 'The last time metrics were updated')
}
last_metrics = 0
def update_metrics():
global last_metrics
stats = _get_stats()
for metric in stats:
if metric not in gauges:
gauges[metric] = Gauge(metric, '')
gauges[metric].set(stats[metric])
db.update_metrics_bulk(stats)
LOG.debug('Updated metrics')
gauges['updated_at'].set_to_current_time()
while True:
try:
jobname, _ = db.dequeue(
'%s-metrics' % config.parsed.get('NODE_NAME'))
if jobname:
if time.time() - last_metrics > 2:
update_metrics()
last_metrics = time.time()
db.resolve('%s-metrics' % config.parsed.get('NODE_NAME'),
jobname)
else:
time.sleep(0.2)
if time.time() - last_metrics > config.parsed.get('SCHEDULER_CACHE_TIMEOUT'):
update_metrics()
last_metrics = time.time()
except Exception as e:
util.ignore_exception('resource statistics', e)
| 31.434783
| 93
| 0.616355
|
import os
import psutil
import time
from prometheus_client import Gauge
from prometheus_client import start_http_server
from shakenfist.daemons import daemon
from shakenfist import config
from shakenfist import db
from shakenfist import logutil
from shakenfist import util
LOG, _ = logutil.setup(__name__)
def _get_stats():
libvirt = util.get_libvirt()
retval = {}
conn = libvirt.open(None)
present_cpus, _, available_cpus = conn.getCPUMap()
retval.update({
'cpu_max': present_cpus,
'cpu_available': available_cpus,
})
retval['cpu_max_per_instance'] = conn.getMaxVcpus(None)
# for i in range(present_cpus):
# per_cpu_stats = conn.getCPUStats(i)
# for key in per_cpu_stats:
# retval['cpu_core%d_%s' % (i, key)] = per_cpu_stats[key]
try:
load_1, load_5, load_15 = psutil.getloadavg()
retval.update({
'cpu_load_1': load_1,
'cpu_load_5': load_5,
'cpu_load_15': load_15,
})
except Exception as e:
util.ignore_exception('load average', e)
# System memory info, converting bytes to mb
stats = psutil.virtual_memory()
retval.update({
'memory_max': stats.total // 1024 // 1024,
'memory_available': stats.available // 1024 // 1024
})
# libvirt memory info, converting kb to mb
memory_status = conn.getMemoryStats(
libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS)
retval.update({
'memory_max_libvirt': memory_status['total'] // 1024,
'memory_available_libvirt': memory_status['free'] // 1024,
})
# Kernel Shared Memory (KSM) information
ksm_details = {}
for ent in os.listdir('/sys/kernel/mm/ksm'):
with open('/sys/kernel/mm/ksm/%s' % ent) as f:
ksm_details['memory_ksm_%s' % ent] = int(f.read().rstrip())
retval.update(ksm_details)
# Disk info
s = os.statvfs(config.parsed.get('STORAGE_PATH'))
disk_counters = psutil.disk_io_counters()
retval.update({
'disk_total': s.f_frsize * s.f_blocks,
'disk_free': s.f_frsize * s.f_bavail,
'disk_used': s.f_frsize * (s.f_blocks - s.f_bfree),
'disk_read_bytes': disk_counters.read_bytes,
'disk_write_bytes': disk_counters.write_bytes,
})
# Network info
net_counters = psutil.net_io_counters()
retval.update({
'network_read_bytes': net_counters.bytes_recv,
'network_write_bytes': net_counters.bytes_sent,
})
# Virtual machine consumption info
total_instances = 0
total_active_instances = 0
total_instance_max_memory = 0
total_instance_actual_memory = 0
total_instance_vcpus = 0
total_instance_cpu_time = 0
for guest in conn.listAllDomains():
try:
active = guest.isActive() == 1
except Exception:
active = False
_, maxmem, mem, cpus, cpu_time = guest.info()
if active:
total_instances += 1
total_active_instances += 1
total_instance_max_memory += maxmem
total_instance_actual_memory += mem
total_instance_vcpus += cpus
total_instance_cpu_time += cpu_time
# Queue health statistics
node_queue_processing, node_queue_waiting = db.get_queue_length(
config.parsed.get('NODE_NAME'))
retval.update({
'cpu_total_instance_vcpus': total_instance_vcpus,
'cpu_total_instance_cpu_time': total_instance_cpu_time,
'memory_total_instance_max': total_instance_max_memory // 1024,
'memory_total_instance_actual': total_instance_actual_memory // 1024,
'instances_total': total_instances,
'instances_active': total_active_instances,
'node_queue_processing': node_queue_processing,
'node_queue_waiting': node_queue_waiting,
})
if util.is_network_node():
network_queue_processing, network_queue_waiting = db.get_queue_length(
'networknode')
retval.update({
'network_queue_processing': network_queue_processing,
'network_queue_waiting': network_queue_waiting,
})
return retval
class Monitor(daemon.Daemon):
def __init__(self, id):
super(Monitor, self).__init__(id)
start_http_server(config.parsed.get('PROMETHEUS_METRICS_PORT'))
def run(self):
LOG.info('Starting')
gauges = {
'updated_at': Gauge('updated_at', 'The last time metrics were updated')
}
last_metrics = 0
def update_metrics():
global last_metrics
stats = _get_stats()
for metric in stats:
if metric not in gauges:
gauges[metric] = Gauge(metric, '')
gauges[metric].set(stats[metric])
db.update_metrics_bulk(stats)
LOG.debug('Updated metrics')
gauges['updated_at'].set_to_current_time()
while True:
try:
jobname, _ = db.dequeue(
'%s-metrics' % config.parsed.get('NODE_NAME'))
if jobname:
if time.time() - last_metrics > 2:
update_metrics()
last_metrics = time.time()
db.resolve('%s-metrics' % config.parsed.get('NODE_NAME'),
jobname)
else:
time.sleep(0.2)
if time.time() - last_metrics > config.parsed.get('SCHEDULER_CACHE_TIMEOUT'):
update_metrics()
last_metrics = time.time()
except Exception as e:
util.ignore_exception('resource statistics', e)
| true
| true
|
1c465512236dd5e487d4620bb11fe1ccf6b857ef
| 631
|
py
|
Python
|
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 4
|
2016-02-21T12:40:44.000Z
|
2019-06-13T13:23:19.000Z
|
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | null | null | null |
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 1
|
2020-07-16T12:22:12.000Z
|
2020-07-16T12:22:12.000Z
|
import os.path
import pysoup.utils.assets
class Logger(object):
def __init__(self, cwd):
self._log = ''
self._cwd = cwd
def log(self, text):
self._log += '{0}\n'.format(text)
def log_dependency_results(self, failed_dependencies):
for dependency in failed_dependencies:
self.log('could not install {0}'.format(dependency))
def dump_to_file(self, filename='soup.log'):
if self._log != '':
with open(os.path.join(self._cwd, filename), 'wb') as f:
f.write(pysoup.utils.assets.LOGO)
f.write('\n{0}'.format(self._log))
| 27.434783
| 68
| 0.59588
|
import os.path
import pysoup.utils.assets
class Logger(object):
def __init__(self, cwd):
self._log = ''
self._cwd = cwd
def log(self, text):
self._log += '{0}\n'.format(text)
def log_dependency_results(self, failed_dependencies):
for dependency in failed_dependencies:
self.log('could not install {0}'.format(dependency))
def dump_to_file(self, filename='soup.log'):
if self._log != '':
with open(os.path.join(self._cwd, filename), 'wb') as f:
f.write(pysoup.utils.assets.LOGO)
f.write('\n{0}'.format(self._log))
| true
| true
|
1c4655f9e7e6644dbd5ab06a55417c8f38cfdb63
| 18,981
|
py
|
Python
|
mindmeld/models/text_models.py
|
ritvikshrivastava/mindmeld
|
48eccac059439ea0f32fa3ac9079415bb006233b
|
[
"Apache-2.0"
] | 580
|
2019-03-24T20:59:09.000Z
|
2022-03-23T17:06:43.000Z
|
mindmeld/models/text_models.py
|
ritvikshrivastava/mindmeld
|
48eccac059439ea0f32fa3ac9079415bb006233b
|
[
"Apache-2.0"
] | 199
|
2019-04-30T18:15:46.000Z
|
2022-03-22T17:11:33.000Z
|
mindmeld/models/text_models.py
|
ritvikshrivastava/mindmeld
|
48eccac059439ea0f32fa3ac9079415bb006233b
|
[
"Apache-2.0"
] | 164
|
2019-04-25T08:27:28.000Z
|
2022-03-23T12:44:33.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all code required to perform multinomial classification
of text.
"""
import logging
import operator
import os
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .evaluation import EvaluatedExample, StandardModelEvaluation
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
)
from .model import ModelConfig, Model, PytorchModel
logger = logging.getLogger(__name__)
class TextModel(Model):
# classifier types
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
ALLOWED_CLASSIFIER_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
# default model scoring type
ACCURACY_SCORING = "accuracy"
_NEG_INF = -1e10
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
"""Returns the information needed pickle an instance of this class.
By default, pickling removes attributes with names starting with
underscores. This overrides that behavior.
"""
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
"""Returns the class of the actual underlying model"""
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
TextModel.LOG_REG_TYPE: LogisticRegression,
TextModel.DECISION_TREE_TYPE: DecisionTreeClassifier,
TextModel.RANDOM_FOREST_TYPE: RandomForestClassifier,
TextModel.SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
"""
Returns the scorer to use based on the selection settings and classifier type,
defaulting to accuracy.
"""
return selection_settings.get("scoring", TextModel.ACCURACY_SCORING)
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
"""Trains a classifier without cross-validation.
Args:
examples (numpy.matrix): The feature matrix for a dataset.
labels (numpy.array): The target output values.
params (dict): Parameters of the classifier
"""
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
# JSON can't reliably encode infinity, so replace it with large number
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = TextModel._NEG_INF
return predictions
def _get_feature_weight(self, feat_name, label_class):
"""Retrieves the feature weight from the coefficient matrix. If there are only two
classes, the feature vector is actually collapsed into one so we need some logic to
handle that case.
Args:
feat_name (str) : The feature name
label_class (int): The index of the label
Returns:
(ndarray float): The ndarray with a single float element
"""
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
"""This class takes an example and returns a 2D list for every feature with feature
name, feature value, feature weight and their product for the predicted label. If gold
label is passed in, we will also include the feature value and weight for the gold
label and returns the log probability of the difference.
Args:
example (Query): The query to be predicted
gold_label (str): The gold label for this string
dynamic_resource (dict, optional): A dynamic resource to aid NLP inference
Returns:
(list of lists): A 2D array that includes every feature, their value, weight and \
probability
"""
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
"""Transforms a list of examples into a feature matrix.
Args:
examples (list): The examples.
Returns:
(tuple): tuple containing:
* (numpy.matrix): The feature matrix.
* (numpy.array): The group labels for examples.
"""
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.text_preparation_pipeline)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
"""
Convert the params from the style given by the config to the style
passed in to the actual classifier.
Args:
param_grid (dict): lists of classifier parameter values, keyed by parameter name
Returns:
(dict): revised param_grid
"""
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
# interpolate between class_bias=0 => class_weight=None
# and class_bias=1 => class_weight='balanced'
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
# these weights are same as sklearn's class_weight='balanced'
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
"""Get a feature selector instance based on the feature_selector model
parameter
Returns:
(Object): a feature selector which returns a reduced feature matrix, \
given the full feature matrix, X and the class labels, y
"""
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
"""Get a feature value scaler based on the model settings"""
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
def evaluate(self, examples, labels):
"""Evaluates a model against the given examples and labels
Args:
examples: A list of examples to predict
labels: A list of expected labels
Returns:
ModelEvaluation: an object containing information about the \
evaluation
"""
# TODO: also expose feature weights?
predictions = self.predict_proba(examples)
# Create a model config object for the current effective config (after param selection)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
"""Trains this model.
This method inspects instance attributes to determine the classifier
object and cross-validation strategy, and then fits the model to the
training examples passed in.
Args:
examples (ProcessedQueryList.*Iterator): A list of examples.
labels (ProcessedQueryList.*Iterator): A parallel list to examples. The gold labels
for each example.
params (dict, optional): Parameters to use when training. Parameter
selection will be bypassed if this is provided
Returns:
(TextModel): Returns self to match classifier scikit-learn \
interfaces.
"""
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
# Shuffle to prevent order effects
indices = list(range(len(labels)))
random.shuffle(indices)
examples.reorder(indices)
labels.reorder(indices)
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
# Extract features and classes
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
# run cross validation to select params
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
@classmethod
def load(cls, path):
metadata = joblib.load(path)
# backwards compatability check for RoleClassifiers
if isinstance(metadata, dict):
return metadata["model"]
# in this case, metadata = model which was serialized and dumped
return metadata
def _dump(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(self, path)
class PytorchTextModel(PytorchModel):
ALLOWED_CLASSIFIER_TYPES = ["embedder", "cnn", "lstm"]
pass
class AutoTextModel:
@staticmethod
def get_model_class(config: ModelConfig):
CLASSES = [TextModel, PytorchTextModel]
classifier_type = config.model_settings["classifier_type"]
for _class in CLASSES:
if classifier_type in _class.ALLOWED_CLASSIFIER_TYPES:
return _class
msg = f"Invalid 'classifier_type': {classifier_type}. " \
f"Allowed types are: {[_class.ALLOWED_CLASSIFIER_TYPES for _class in CLASSES]}"
raise ValueError(msg)
| 37
| 97
| 0.61393
|
import logging
import operator
import os
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .evaluation import EvaluatedExample, StandardModelEvaluation
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
)
from .model import ModelConfig, Model, PytorchModel
logger = logging.getLogger(__name__)
class TextModel(Model):
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
ALLOWED_CLASSIFIER_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
ACCURACY_SCORING = "accuracy"
_NEG_INF = -1e10
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
TextModel.LOG_REG_TYPE: LogisticRegression,
TextModel.DECISION_TREE_TYPE: DecisionTreeClassifier,
TextModel.RANDOM_FOREST_TYPE: RandomForestClassifier,
TextModel.SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
return selection_settings.get("scoring", TextModel.ACCURACY_SCORING)
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = TextModel._NEG_INF
return predictions
def _get_feature_weight(self, feat_name, label_class):
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.text_preparation_pipeline)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
def evaluate(self, examples, labels):
# TODO: also expose feature weights?
predictions = self.predict_proba(examples)
# Create a model config object for the current effective config (after param selection)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
# Shuffle to prevent order effects
indices = list(range(len(labels)))
random.shuffle(indices)
examples.reorder(indices)
labels.reorder(indices)
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
# Extract features and classes
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
# run cross validation to select params
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource,
text_preparation_pipeline=self.text_preparation_pipeline
)
@classmethod
def load(cls, path):
metadata = joblib.load(path)
# backwards compatability check for RoleClassifiers
if isinstance(metadata, dict):
return metadata["model"]
# in this case, metadata = model which was serialized and dumped
return metadata
def _dump(self, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
joblib.dump(self, path)
class PytorchTextModel(PytorchModel):
ALLOWED_CLASSIFIER_TYPES = ["embedder", "cnn", "lstm"]
pass
class AutoTextModel:
@staticmethod
def get_model_class(config: ModelConfig):
CLASSES = [TextModel, PytorchTextModel]
classifier_type = config.model_settings["classifier_type"]
for _class in CLASSES:
if classifier_type in _class.ALLOWED_CLASSIFIER_TYPES:
return _class
msg = f"Invalid 'classifier_type': {classifier_type}. " \
f"Allowed types are: {[_class.ALLOWED_CLASSIFIER_TYPES for _class in CLASSES]}"
raise ValueError(msg)
| true
| true
|
1c465740ae5fe9f566269cf6b2d71d8bc9882dcb
| 28,276
|
py
|
Python
|
Core/Python/invoke_refresh_inventory.py
|
prasadrao-dell/OpenManage-Enterprise
|
f9bd0e821701902d6571a54663a7c9ef4f2308b3
|
[
"Apache-2.0"
] | 1
|
2020-07-18T13:05:48.000Z
|
2020-07-18T13:05:48.000Z
|
Core/Python/invoke_refresh_inventory.py
|
prasadrao-dell/OpenManage-Enterprise
|
f9bd0e821701902d6571a54663a7c9ef4f2308b3
|
[
"Apache-2.0"
] | 11
|
2020-07-22T07:33:14.000Z
|
2020-08-20T12:01:55.000Z
|
Core/Python/invoke_refresh_inventory.py
|
prasadrao-dell/OpenManage-Enterprise
|
f9bd0e821701902d6571a54663a7c9ef4f2308b3
|
[
"Apache-2.0"
] | 4
|
2020-06-03T11:38:34.000Z
|
2020-08-11T10:38:57.000Z
|
#
# _author_ = Grant Curell <grant_curell@dell.com>
#
# Copyright (c) 2020 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#### Synopsis
Refreshes the inventory on a set of target devices. This includes the configuration inventory tab.
#### Description
This script uses the OME REST API to refresh the inventory of a targeted server. It performs X-Auth
with basic authentication. Note: Credentials are not stored on disk.
#### Python Example
`python invoke_refresh_inventory.py -i 192.168.1.93 -u admin -p somepass --idrac-ips 192.168.1.63,192.168.1.45`
"""
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
from getpass import getpass
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
"""
Authenticates with OME and creates a session
Args:
ome_ip_address: IP address of the OME server
ome_username: Username for OME
ome_password: OME password
Returns: A dictionary of HTTP headers
Raises:
Exception: A generic exception in the event of a failure to connect.
"""
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': ome_password,
'SessionType': 'API'}
try:
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
except requests.exceptions.ConnectionError:
print("Failed to connect to OME. This typically indicates a network connectivity problem. Can you ping OME?")
sys.exit(0)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
"""
Retrieves the ID of a group given its name.
Args:
ome_ip_address: The IP address of the OME server
group_name: The name of the group whose ID you want to resolve.
authenticated_headers: Headers used for authentication to the OME server
Returns: Returns the ID of the group as an integer or -1 if it couldn't be found.
"""
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:
"""
This function retrieves data from a specified URL. Get requests from OME return paginated data. The code below
handles pagination. This is the equivalent in the UI of a list of results that require you to go to different
pages to get a complete listing.
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
url: The API url against which you would like to make a request
odata_filter: An optional parameter for providing an odata filter to run against the API endpoint.
max_pages: The maximum number of pages you would like to return
Returns: Returns a dictionary of data received from OME
"""
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return {}
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return {}
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
# Grab the base URI
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
# Break if we have reached the maximum number of pages to be returned
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return {}
# The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it
# is present we get a link to the page with the next set of results.
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
"""
Tracks a job to either completion or a failure within the job.
Args:
ome_ip_address: The IP address of the OME server
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
tracked_job_id: The ID of the job which you would like to track
max_retries: The maximum number of times the function should contact the server to see if the job has completed
sleep_interval: The frequency with which the function should check the server for job completion
Returns: True if the job completed successfully or completed with errors. Returns false if the job failed.
"""
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=authenticated_headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
# Get the job's execution details
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
"""
Resolves a service tag, idrac IP or device name to a device ID
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
service_tag: (optional) The service tag of a host
device_idrac_ip: (optional) The idrac IP of a host
device_name: (optional): The name of a host
Returns: Returns the device ID or -1 if it couldn't be found
"""
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return multiple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
"""
Refresh the inventory of targeted hosts
Args:
authenticated_headers: A dictionary of HTTP headers generated from an authenticated session with OME
ome_ip_address: IP address of the OME server
group_name: The name of the group which contains the servers whose inventories you want to refresh
skip_config_inventory: A boolean defining whether you would like to skip gathering the config inventory
device_ids: (optional) The device ID of a host whose inventory you want to refresh
service_tags: (optional) The service tag of a host whose inventory you want to refresh
device_idrac_ips: (optional) The idrac IP of a host whose inventory you want to refresh
device_names: (optional): The name of a host whose inventory you want to refresh
ignore_group: (optional): Controls whether you want to ignore using groups or not
"""
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
# ------------------------------------------------------
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=False,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
if not args.password:
args.password = getpass()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
| 44.599369
| 120
| 0.609245
|
import argparse
import json
import sys
import time
from argparse import RawTextHelpFormatter
from pprint import pprint
from urllib.parse import urlparse
from getpass import getpass
try:
import urllib3
import requests
except ModuleNotFoundError:
print("This program requires urllib3 and requests. To install them on most systems run `pip install requests"
"urllib3`")
sys.exit(0)
def authenticate(ome_ip_address: str, ome_username: str, ome_password: str) -> dict:
authenticated_headers = {'content-type': 'application/json'}
session_url = 'https://%s/api/SessionService/Sessions' % ome_ip_address
user_details = {'UserName': ome_username,
'Password': ome_password,
'SessionType': 'API'}
try:
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=authenticated_headers)
except requests.exceptions.ConnectionError:
print("Failed to connect to OME. This typically indicates a network connectivity problem. Can you ping OME?")
sys.exit(0)
if session_info.status_code == 201:
authenticated_headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
return authenticated_headers
print("There was a problem authenticating with OME. Are you sure you have the right username, password, "
"and IP?")
raise Exception("There was a problem authenticating with OME. Are you sure you have the right username, "
"password, and IP?")
def get_group_id_by_name(ome_ip_address: str, group_name: str, authenticated_headers: dict) -> int:
print("Searching for the requested group.")
groups_url = "https://%s/api/GroupService/Groups?$filter=Name eq '%s'" % (ome_ip_address, group_name)
group_response = requests.get(groups_url, headers=authenticated_headers, verify=False)
if group_response.status_code == 200:
json_data = json.loads(group_response.content)
if json_data['@odata.count'] > 1:
print("WARNING: We found more than one name that matched the group name: " + group_name +
". We are picking the first entry.")
if json_data['@odata.count'] == 1 or json_data['@odata.count'] > 1:
group_id = json_data['value'][0]['Id']
if not isinstance(group_id, int):
print("The server did not return an integer ID. Something went wrong.")
return -1
return group_id
print("Error: We could not find the group " + group_name + ". Exiting.")
return -1
print("Unable to retrieve groups. Exiting.")
return -1
def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:
next_link_url = None
if odata_filter:
count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)
if count_data.status_code == 400:
print("Received an error while retrieving data from %s:" % url + '?$filter=' + odata_filter)
pprint(count_data.json()['error'])
return {}
count_data = count_data.json()
if count_data['@odata.count'] <= 0:
print("No results found!")
return {}
else:
count_data = requests.get(url, headers=authenticated_headers, verify=False).json()
if 'value' in count_data:
data = count_data['value']
else:
data = count_data
if '@odata.nextLink' in count_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']
i = 1
while next_link_url is not None:
if max_pages:
if i >= max_pages:
break
else:
i = i + 1
response = requests.get(next_link_url, headers=authenticated_headers, verify=False)
next_link_url = None
if response.status_code == 200:
requested_data = response.json()
if requested_data['@odata.count'] <= 0:
print("No results found!")
return {}
if '@odata.nextLink' in requested_data:
next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \
requested_data['@odata.nextLink']
if 'value' in requested_data:
data += requested_data['value']
else:
data += requested_data
else:
print("Unknown error occurred. Received HTTP response code: " + str(response.status_code) +
" with error: " + response.text)
raise Exception("Unknown error occurred. Received HTTP response code: " + str(response.status_code)
+ " with error: " + response.text)
return data
def track_job_to_completion(ome_ip_address: str,
authenticated_headers: dict,
tracked_job_id,
max_retries: int = 20,
sleep_interval: int = 30) -> bool:
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ome_ip_address, tracked_job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % tracked_job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=authenticated_headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
job_status_str = job_status_map[job_status]
print("Iteration %s: Status of %s is %s" % (loop_ctr, tracked_job_id, job_status_str))
if int(job_status) == 2060:
job_incomplete = False
print("Job completed successfully!")
break
elif int(job_status) in failed_job_status:
job_incomplete = True
if job_status_str == "Warning":
print("Completed with errors")
else:
print("Error: Job failed.")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=authenticated_headers, verify=False)
if job_hist_resp.status_code == 200:
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
execution_hist_detail = "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_url = str(job_hist_url) + execution_hist_detail
job_hist_det_resp = requests.get(job_hist_det_url,
headers=authenticated_headers,
verify=False)
if job_hist_det_resp.status_code == 200:
pprint(job_hist_det_resp.json()['value'])
else:
print("Unable to parse job execution history... exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (tracked_job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (tracked_job_id, max_retries))
return False
return True
def get_device_id(authenticated_headers: dict,
ome_ip_address: str,
service_tag: str = None,
device_idrac_ip: str = None,
device_name: str = None) -> int:
if not service_tag and not device_idrac_ip and not device_name:
print("No argument provided to get_device_id. Must provide service tag, device idrac IP or device name.")
return -1
# If the user passed a device name, resolve that name to a device ID
if device_name:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceName eq \'%s\'" % device_name)
if len(device_id) == 0:
print("Error: We were unable to find device name " + device_name + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif service_tag:
device_id = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceServiceTag eq \'%s\'" % service_tag)
if len(device_id) == 0:
print("Error: We were unable to find service tag " + service_tag + " on this OME server. Exiting.")
return -1
device_id = device_id[0]['Id']
elif device_idrac_ip:
device_id = -1
device_ids = get_data(authenticated_headers, "https://%s/api/DeviceService/Devices" % ome_ip_address,
"DeviceManagement/any(d:d/NetworkAddress eq '%s')" % device_idrac_ip)
if len(device_ids) == 0:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
# TODO - This is necessary because the filter above could possibly return multiple results
# TODO - See https://github.com/dell/OpenManage-Enterprise/issues/87
for device_id in device_ids:
if device_id['DeviceManagement'][0]['NetworkAddress'] == device_idrac_ip:
device_id = device_id['Id']
if device_id == -1:
print("Error: We were unable to find idrac IP " + device_idrac_ip + " on this OME server. Exiting.")
return -1
else:
device_id = -1
return device_id
def refresh_device_inventory(authenticated_headers: dict,
ome_ip_address: str,
group_name: str,
skip_config_inventory: bool,
device_ids: list = None,
service_tags: str = None,
device_idrac_ips: str = None,
device_names: str = None,
ignore_group: bool = False):
jobs_url = "https://%s/api/JobService/Jobs" % ome_ip_address
target_ids = []
if service_tags:
service_tags = service_tags.split(',')
for service_tag in service_tags:
target = get_device_id(headers, ome_ip_address, service_tag=service_tag)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + service_tag)
if device_idrac_ips:
device_idrac_ips = args.idrac_ips.split(',')
for device_idrac_ip in device_idrac_ips:
target = get_device_id(headers, ome_ip_address, device_idrac_ip=device_idrac_ip)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_idrac_ip)
if device_names:
device_names = device_names.split(',')
for device_name in device_names:
target = get_device_id(headers, ome_ip_address, device_name=device_name)
if target != -1:
target_ids.append(target)
else:
print("Could not resolve ID for: " + device_name)
if device_ids:
for device_id in device_ids:
target_ids.append(device_id)
if not skip_config_inventory:
group_id = get_group_id_by_name(ome_ip_address, group_name, authenticated_headers)
if group_id == -1:
print("We were unable to find the ID for group name " + group_name + " ... exiting.")
sys.exit(0)
if not ignore_group:
group_devices = get_data(headers, "https://%s/api/GroupService/Groups(%s)/Devices" % (ome_ip_address, group_id))
if len(group_devices) < 1:
print("Error: There was a problem retrieving the devices for group " + args.groupname + ". Exiting")
sys.exit(0)
for device in group_devices:
target_ids.append(device['Id'])
targets_payload = []
for id_to_refresh in target_ids:
targets_payload.append({
"Id": id_to_refresh,
"Data": "",
"TargetType": {
"Id": 1000,
"Name": "DEVICE"
}
})
payload = {
"Id": 0,
"JobName": "Inventory refresh via the API.",
"JobDescription": "Refreshes the inventories for targeted hardware.",
"Schedule": "startnow",
"State": "Enabled",
"JobType": {
"Name": "Inventory_Task"
},
"Targets": targets_payload
}
print("Beginning standard inventory refresh...")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
job_id_generic_refresh = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if job_id_generic_refresh is None:
print("Received invalid job ID from OME for standard inventory. Exiting.")
sys.exit(1)
if not skip_config_inventory:
payload = {
"JobDescription": "Run config inventory collection task on selected devices",
"JobName": "Part 1 - API refresh config inventory",
"JobType": {"Id": 50, "Name": "Device_Config_Task"},
"Params": [{"Key": "action", "Value": "CONFIG_INVENTORY"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 1 of 2 of the configuration inventory refresh.")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_1 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_1 is None:
print("Received invalid job ID from OME for part 1 of configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 1 of configuration inventory refresh to finish. This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_1):
print("Part 1 of configuration inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
# ------------------------------------------------------
payload = {
"JobDescription": "Create Inventory",
"JobName": "Part 2 - API refresh config inventory",
"JobType": {"Id": 8, "Name": "Inventory_Task"},
"Params": [
{"Key": "action", "Value": "CONFIG_INVENTORY"},
{"Key": "isCollectDriverInventory", "Value": "true"}],
"Schedule": "startnow",
"StartTime": "",
"State": "Enabled",
"Targets": [{
"Data": "",
"Id": group_id,
"JobId": -1,
"TargetType": {"Id": 6000, "Name": "GROUP"}
}]
}
print("Beginning part 2 of 2 of the configuration inventory refresh")
create_resp = requests.post(jobs_url, headers=authenticated_headers, verify=False, data=json.dumps(payload))
if create_resp.status_code == 201:
config_inventory_refresh_job_2 = json.loads(create_resp.content)["Id"]
else:
print("Error: Failed to refresh inventory. We aren't sure what went wrong.")
sys.exit(1)
if config_inventory_refresh_job_2 is None:
print("Received invalid job ID from OME for part 2 of the configuration inventory refresh... exiting.")
sys.exit(1)
print("Waiting for part 2 of the configuration inventory refresh to finish. "
"This could take a couple of minutes.")
if track_job_to_completion(ome_ip_address, authenticated_headers, config_inventory_refresh_job_2):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Tracking standard inventory to completion.")
if track_job_to_completion(ome_ip_address, authenticated_headers, job_id_generic_refresh):
print("Inventory refresh completed successfully.")
else:
print("Something went wrong. See text output above for more details.")
print("Inventory refresh complete!")
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP")
parser.add_argument("--user", "-u", required=False,
help="Username for the OME Appliance", default="admin")
parser.add_argument("--password", "-p", required=False,
help="Password for the OME Appliance")
parser.add_argument("--groupname", "-g", required=False, default="All Devices",
help="The name of the group containing the devices whose inventory you want to refresh. "
"Defaults to all devices. Due to the way the API functions, if you want to refresh the "
"configuration inventory, you must have all applicable devices in a group. The "
"configuration inventory is specific to the tab called \"Configuration Inventory\" under "
"a device's view. You can use the create_static_group and add_device_to_static group "
"modules to do this programmatically.")
parser.add_argument("--device-ids", "-d", help="A comma separated list of device-ids to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--service-tags", "-s", help="A comma separated list of service tags to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--idrac-ips", "-r", help="A comma separated list of idrac IPs to refresh. Applies to regular "
"inventory only. This does not impact the configuration inventory "
"tab. That is controlled by the group name.")
parser.add_argument("--device-names", "-n", help="A comma separated list of device names to refresh. Applies to "
"regular inventory only. This does not impact the configuration "
"inventory tab. That is controlled by the group name.")
parser.add_argument("--skip-config-inventory", "-skip", default=False, action='store_true',
help="The configuration inventory is the inventory you see specifically under the tab for a"
" specific device. In order to obtain a config inventory that server must be part of a"
" group or you have to run an inventory update against all devices which can be time "
"consuming. A regular inventory run will update things like firmware assuming that the"
" version change is reflected in idrac. A config inventory is launched in the GUI by "
"clicking \"Run inventory\" on quick links on the devices page. A regular inventory is "
"the same as clicking \"Run inventory\" on a specific device\'s page.")
parser.add_argument("--ignore-group", default=False, action='store_true', help="Used when you only want to run a"
" regular inventory and you do not want to provide a group.")
args = parser.parse_args()
if not args.password:
args.password = getpass()
try:
headers = authenticate(args.ip, args.user, args.password)
if not headers:
sys.exit(0)
if args.device_ids:
device_ids_arg = args.device_ids.split(',')
else:
device_ids_arg = None
if args.service_tags:
service_tags_arg = args.service_tags.split(',')
else:
service_tags_arg = None
if args.idrac_ips:
idrac_ips_arg = args.idrac_ips.split(',')
else:
idrac_ips_arg = None
if args.device_names:
device_names_arg = args.device_names.split(',')
else:
device_names_arg = None
print("WARNING: To reflect firmware changes you may have to power cycle the server first before running this. "
"It is situation dependent.")
if args.groupname == 'All Devices':
print("WARNING: No argument was provided for groupname. Defaulting to \'All Devices\' for the "
"inventory refresh. See help for details. This will also display if the argument was manually set "
"to \'All Devices\' and can be safely ignored. If you do not want to use a group AND you do not want"
" to update the configuration inventory tab, use the --skip-config-inventory and --ignore-group"
" switches together. If you want to use a group to update regular inventories only and not the"
" configuration inventory tab use the --skip-config-inventory switch by itself.")
refresh_device_inventory(headers, args.ip, args.groupname, args.skip_config_inventory, device_ids_arg,
service_tags_arg, idrac_ips_arg, device_names_arg, args.ignore_group)
except Exception as error:
print("Unexpected error:", str(error))
| true
| true
|
1c465889a1c778474e5db6bd5a5c7d2042d61766
| 2,091
|
py
|
Python
|
source-py/pyBKT/test/hand_specified_model.py
|
bukeplato/pyBKT
|
733a4ccf0de78bef7d47b5a6af7131c7778560db
|
[
"MIT"
] | 132
|
2018-03-22T06:04:14.000Z
|
2022-03-24T21:54:27.000Z
|
source-py/pyBKT/test/hand_specified_model.py
|
bukeplato/pyBKT
|
733a4ccf0de78bef7d47b5a6af7131c7778560db
|
[
"MIT"
] | 25
|
2018-01-10T14:00:48.000Z
|
2022-03-22T04:00:47.000Z
|
source-py/pyBKT/test/hand_specified_model.py
|
bukeplato/pyBKT
|
733a4ccf0de78bef7d47b5a6af7131c7778560db
|
[
"MIT"
] | 46
|
2017-09-12T04:30:58.000Z
|
2022-03-10T08:54:52.000Z
|
import numpy as np
from pyBKT.generate import synthetic_data
from pyBKT.generate import random_model, random_model_uni
from pyBKT.fit import EM_fit
from copy import deepcopy
from pyBKT.util import print_dot
#parameters
num_subparts = 4
num_resources = 2
num_fit_initializations = 25
observation_sequence_lengths = np.full(50, 100, dtype=np.int)
#generate synthetic model and data.
#model is really easy.
truemodel = {}
truemodel["As"] = np.zeros((num_resources, 2, 2), dtype=np.float_)
truemodel["As"][0, :, :] = np.transpose([[0.75, 0.25], [0.1, 0.9]])
truemodel["As"][1, :, :] = np.transpose([[0.9, 0.1], [0.1, 0.9]])
truemodel["learns"] = truemodel["As"][:, 1, 0]
truemodel["forgets"] = truemodel["As"][:, 0, 1]
truemodel["pi_0"] = np.array([[0.9], [0.1]]) #TODO: one prior per resource? does this array needs to be col?
truemodel["prior"] = 0.1
truemodel["guesses"] = np.full(num_subparts, 0.05, dtype=np.float_)
truemodel["slips"] = np.full(num_subparts, 0.25, dtype=np.float_)
truemodel["resources"] = np.random.randint(1, high = num_resources+1, size = sum(observation_sequence_lengths))
#data!
print("generating data...")
data = synthetic_data.synthetic_data(truemodel, observation_sequence_lengths)
#fit models, starting with random initializations
print('fitting! each dot is a new EM initialization')
best_likelihood = float("-inf")
for i in range(num_fit_initializations):
print_dot.print_dot(i, num_fit_initializations)
fitmodel = random_model.random_model(num_resources, num_subparts)
(fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
if (log_likelihoods[-1] > best_likelihood):
best_likelihood = log_likelihoods[-1]
best_model = fitmodel
# compare the fit model to the true model
print('')
print('these two should look similar')
print(truemodel['As'])
print('')
print(best_model['As'])
print('')
print('these should look similar too')
print(1-truemodel['guesses'])
print('')
print(1-best_model['guesses'])
print('')
print('these should look similar too')
print(1-truemodel['slips'])
print('')
print(1-best_model['slips'])
| 31.681818
| 111
| 0.724055
|
import numpy as np
from pyBKT.generate import synthetic_data
from pyBKT.generate import random_model, random_model_uni
from pyBKT.fit import EM_fit
from copy import deepcopy
from pyBKT.util import print_dot
num_subparts = 4
num_resources = 2
num_fit_initializations = 25
observation_sequence_lengths = np.full(50, 100, dtype=np.int)
truemodel = {}
truemodel["As"] = np.zeros((num_resources, 2, 2), dtype=np.float_)
truemodel["As"][0, :, :] = np.transpose([[0.75, 0.25], [0.1, 0.9]])
truemodel["As"][1, :, :] = np.transpose([[0.9, 0.1], [0.1, 0.9]])
truemodel["learns"] = truemodel["As"][:, 1, 0]
truemodel["forgets"] = truemodel["As"][:, 0, 1]
truemodel["pi_0"] = np.array([[0.9], [0.1]])
truemodel["prior"] = 0.1
truemodel["guesses"] = np.full(num_subparts, 0.05, dtype=np.float_)
truemodel["slips"] = np.full(num_subparts, 0.25, dtype=np.float_)
truemodel["resources"] = np.random.randint(1, high = num_resources+1, size = sum(observation_sequence_lengths))
print("generating data...")
data = synthetic_data.synthetic_data(truemodel, observation_sequence_lengths)
print('fitting! each dot is a new EM initialization')
best_likelihood = float("-inf")
for i in range(num_fit_initializations):
print_dot.print_dot(i, num_fit_initializations)
fitmodel = random_model.random_model(num_resources, num_subparts)
(fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
if (log_likelihoods[-1] > best_likelihood):
best_likelihood = log_likelihoods[-1]
best_model = fitmodel
print('')
print('these two should look similar')
print(truemodel['As'])
print('')
print(best_model['As'])
print('')
print('these should look similar too')
print(1-truemodel['guesses'])
print('')
print(1-best_model['guesses'])
print('')
print('these should look similar too')
print(1-truemodel['slips'])
print('')
print(1-best_model['slips'])
| true
| true
|
1c4658b4bb64b7f6ea6eb1dbc078b2ce403e3327
| 369
|
py
|
Python
|
Problem124.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
Problem124.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
Problem124.py
|
Cleancode404/ProjectEuler
|
2f93b256b107bfb6a395b8aa197cfeacc599b00b
|
[
"MIT"
] | null | null | null |
"""
Ordered radicals
"""
def compute(x):
limit = 100000
rads = [0] + [1]* limit
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rads, i) for (i, rad) in enumerate(rads))
return str(data[1000][1])
if __name__ =="__main__":
print(compute(10000))
| 18.45
| 60
| 0.517615
|
def compute(x):
limit = 100000
rads = [0] + [1]* limit
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rads, i) for (i, rad) in enumerate(rads))
return str(data[1000][1])
if __name__ =="__main__":
print(compute(10000))
| true
| true
|
1c4659f51ad3a120a0b93c0284ea7b59b39d919d
| 537
|
py
|
Python
|
setup.py
|
sw5cc/tencent-finance
|
08da6a75904055a6113a01c86377b613cbe07033
|
[
"MIT"
] | null | null | null |
setup.py
|
sw5cc/tencent-finance
|
08da6a75904055a6113a01c86377b613cbe07033
|
[
"MIT"
] | null | null | null |
setup.py
|
sw5cc/tencent-finance
|
08da6a75904055a6113a01c86377b613cbe07033
|
[
"MIT"
] | null | null | null |
from setuptools import setup
VERSION = '1.0.0'
REPO = 'https://github.com/sw5cc/tencent-finance'
setup(
name='tencent-finance',
py_modules=['tencent_finance'],
version=VERSION,
description='Python library that provides APIs to query finance from http://stock.qq.com',
author='sw5cc',
author_email='sw5cc.125pflops@gmail.com',
license='MIT',
url=REPO,
download_url='{0}/archive/{1}.tar.gz'.format(REPO, VERSION),
keywords=['tencent', 'finance'],
install_requires=['requests', 'simplejson']
)
| 28.263158
| 94
| 0.683426
|
from setuptools import setup
VERSION = '1.0.0'
REPO = 'https://github.com/sw5cc/tencent-finance'
setup(
name='tencent-finance',
py_modules=['tencent_finance'],
version=VERSION,
description='Python library that provides APIs to query finance from http://stock.qq.com',
author='sw5cc',
author_email='sw5cc.125pflops@gmail.com',
license='MIT',
url=REPO,
download_url='{0}/archive/{1}.tar.gz'.format(REPO, VERSION),
keywords=['tencent', 'finance'],
install_requires=['requests', 'simplejson']
)
| true
| true
|
1c465c0941cce89c8fc109d641fe9e2f109a55e6
| 1,071
|
py
|
Python
|
python/time_test.py
|
ysoftman/test_code
|
4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8
|
[
"MIT"
] | 3
|
2017-12-07T04:29:36.000Z
|
2022-01-11T10:58:14.000Z
|
python/time_test.py
|
ysoftman/test_code
|
4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8
|
[
"MIT"
] | 14
|
2018-07-17T05:16:42.000Z
|
2022-03-22T00:43:47.000Z
|
python/time_test.py
|
ysoftman/test_code
|
4c71cc7c6a17d73cc84298e3a44051d3ab9d40f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : time test
import time
import datetime
if __name__ == '__main__':
# epoch time
print(time.time())
# suspend for 1 sec
time.sleep(1)
# 프로세스 시간(초)
start = time.clock()
# 현재 시간을 struct_time 형식으로 리턴
print(time.localtime())
print(time.localtime().tm_year)
print(time.localtime().tm_mon)
print(time.localtime().tm_mday)
print(time.localtime().tm_hour)
print(time.localtime().tm_min)
print(time.localtime().tm_sec)
day_of_week = {
0: "monday",
1: "tuesday",
2: "wednesday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
# 요일 (월요일:0~일요일:6)
wday = time.localtime().tm_wday
print(wday, '->', day_of_week.get(wday))
end = time.perf_counter()
print('elapsed time : ', end - start, 'sec')
# 현재 타임스탬프
print(datetime.date.fromtimestamp(time.time()))
# 10일 후 날짜 표시
td = datetime.timedelta(days=10)
print(datetime.date.today() + td)
| 21.42
| 51
| 0.582633
|
import time
import datetime
if __name__ == '__main__':
print(time.time())
time.sleep(1)
start = time.clock()
print(time.localtime())
print(time.localtime().tm_year)
print(time.localtime().tm_mon)
print(time.localtime().tm_mday)
print(time.localtime().tm_hour)
print(time.localtime().tm_min)
print(time.localtime().tm_sec)
day_of_week = {
0: "monday",
1: "tuesday",
2: "wednesday",
3: "thursday",
4: "friday",
5: "saturday",
6: "sunday",
}
wday = time.localtime().tm_wday
print(wday, '->', day_of_week.get(wday))
end = time.perf_counter()
print('elapsed time : ', end - start, 'sec')
print(datetime.date.fromtimestamp(time.time()))
td = datetime.timedelta(days=10)
print(datetime.date.today() + td)
| true
| true
|
1c465c6a86486509dd27a24054b97bb891f2c729
| 1,867
|
py
|
Python
|
tests/components/folder/test_sensor.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 7
|
2019-02-07T14:14:12.000Z
|
2019-07-28T06:56:10.000Z
|
tests/components/folder/test_sensor.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:54:31.000Z
|
2022-03-12T00:50:43.000Z
|
tests/components/folder/test_sensor.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 2
|
2020-04-19T13:35:24.000Z
|
2020-04-19T13:35:51.000Z
|
"""The tests for the folder sensor."""
import os
import unittest
from homeassistant.components.folder.sensor import CONF_FOLDER_PATHS
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
CWD = os.path.join(os.path.dirname(__file__))
TEST_FOLDER = "test_folder"
TEST_DIR = os.path.join(CWD, TEST_FOLDER)
TEST_TXT = "mock_test_folder.txt"
TEST_FILE = os.path.join(TEST_DIR, TEST_TXT)
def create_file(path):
"""Create a test file."""
with open(path, "w") as test_file:
test_file.write("test")
class TestFolderSensor(unittest.TestCase):
"""Test the filesize sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
if not os.path.isdir(TEST_DIR):
os.mkdir(TEST_DIR)
self.hass.config.whitelist_external_dirs = {TEST_DIR}
def teardown_method(self, method):
"""Stop everything that was started."""
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
os.rmdir(TEST_DIR)
self.hass.stop()
def test_invalid_path(self):
"""Test that an invalid path is caught."""
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: "invalid_path"}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 0
def test_valid_path(self):
"""Test for a valid path."""
create_file(TEST_FILE)
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: TEST_DIR}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.test_folder")
assert state.state == "0.0"
assert state.attributes.get("number_of_files") == 1
| 33.945455
| 86
| 0.666845
|
import os
import unittest
from homeassistant.components.folder.sensor import CONF_FOLDER_PATHS
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
CWD = os.path.join(os.path.dirname(__file__))
TEST_FOLDER = "test_folder"
TEST_DIR = os.path.join(CWD, TEST_FOLDER)
TEST_TXT = "mock_test_folder.txt"
TEST_FILE = os.path.join(TEST_DIR, TEST_TXT)
def create_file(path):
with open(path, "w") as test_file:
test_file.write("test")
class TestFolderSensor(unittest.TestCase):
def setup_method(self, method):
self.hass = get_test_home_assistant()
if not os.path.isdir(TEST_DIR):
os.mkdir(TEST_DIR)
self.hass.config.whitelist_external_dirs = {TEST_DIR}
def teardown_method(self, method):
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
os.rmdir(TEST_DIR)
self.hass.stop()
def test_invalid_path(self):
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: "invalid_path"}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 0
def test_valid_path(self):
create_file(TEST_FILE)
config = {"sensor": {"platform": "folder", CONF_FOLDER_PATHS: TEST_DIR}}
assert setup_component(self.hass, "sensor", config)
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.test_folder")
assert state.state == "0.0"
assert state.attributes.get("number_of_files") == 1
| true
| true
|
1c465d2bf7cc3b2557d4537d22985e65be65189e
| 6,600
|
py
|
Python
|
utils/models/mobilenet_v2.py
|
voldemortX/DeeplabV3_PyTorch1.3_Codebase
|
d22d23e74800fafb58eeb61d6649008745c1a287
|
[
"BSD-3-Clause"
] | 1
|
2020-09-17T06:21:39.000Z
|
2020-09-17T06:21:39.000Z
|
utils/models/mobilenet_v2.py
|
voldemortX/pytorch-segmentation
|
9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda
|
[
"BSD-3-Clause"
] | null | null | null |
utils/models/mobilenet_v2.py
|
voldemortX/pytorch-segmentation
|
9c62c0a721d11c8ea6bf312ecf1c7b238a54dcda
|
[
"BSD-3-Clause"
] | null | null | null |
# Modified from mmsegmentation code, referenced from torchvision
import torch.nn as nn
from .builder import MODELS
from ._utils import make_divisible
from .common_models import InvertedResidual
from .utils import load_state_dict_from_url
@MODELS.register()
class MobileNetV2Encoder(nn.Module):
"""MobileNetV2 backbone (up to second-to-last feature map).
This backbone is the implementation of
`MobileNetV2: Inverted Residuals and Linear Bottlenecks
<https://arxiv.org/abs/1801.04381>`_.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
strides (Sequence[int], optional): Strides of the first block of each
layer. If not specified, default config in ``arch_setting`` will
be used.
dilations (Sequence[int]): Dilation of each layer.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
pretrained (str, optional): model pretrained path. Default: None
out_stride (int): the output stride of the output feature map
"""
# Parameters to build layers. 3 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks.
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4],
[6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1., strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1),
out_indices=(1, 2, 4, 6), frozen_stages=-1, norm_eval=False, pretrained=None,
progress=True, out_stride=0):
super(MobileNetV2Encoder, self).__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == len(self.arch_settings)
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 7):
raise ValueError('the item in out_indices must in range(0, 7). But received {index}')
if frozen_stages not in range(-1, 7):
raise ValueError('frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.out_stride = out_stride
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU6()
)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if self.pretrained is None:
self.weight_initialization()
else:
self.load_pretrained(progress=progress)
def load_pretrained(self, progress):
state_dict = load_state_dict_from_url(self.pretrained, progress=progress)
self_state_dict = self.state_dict()
self_keys = list(self_state_dict.keys())
for i, (_, v) in enumerate(state_dict.items()):
if i > len(self_keys) - 1:
break
self_state_dict[self_keys[i]] = v
self.load_state_dict(self_state_dict)
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def make_layer(self, out_channels, num_blocks, stride, dilation,
expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): Number of blocks.
stride (int): Stride of the first block.
dilation (int): Dilation of the first block.
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio.
"""
layers = []
for i in range(num_blocks):
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride if i == 0 else 1,
expand_ratio=expand_ratio,
dilation=dilation if i == 0 else 1)
)
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
| 42.038217
| 116
| 0.594697
|
import torch.nn as nn
from .builder import MODELS
from ._utils import make_divisible
from .common_models import InvertedResidual
from .utils import load_state_dict_from_url
@MODELS.register()
class MobileNetV2Encoder(nn.Module):
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4],
[6, 96, 3], [6, 160, 3], [6, 320, 1]]
def __init__(self, widen_factor=1., strides=(1, 2, 2, 2, 1, 2, 1), dilations=(1, 1, 1, 1, 1, 1, 1),
out_indices=(1, 2, 4, 6), frozen_stages=-1, norm_eval=False, pretrained=None,
progress=True, out_stride=0):
super(MobileNetV2Encoder, self).__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == len(self.arch_settings)
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 7):
raise ValueError('the item in out_indices must in range(0, 7). But received {index}')
if frozen_stages not in range(-1, 7):
raise ValueError('frozen_stages must be in range(-1, 7). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.out_stride = out_stride
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU6()
)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks = layer_cfg
stride = self.strides[i]
dilation = self.dilations[i]
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if self.pretrained is None:
self.weight_initialization()
else:
self.load_pretrained(progress=progress)
def load_pretrained(self, progress):
state_dict = load_state_dict_from_url(self.pretrained, progress=progress)
self_state_dict = self.state_dict()
self_keys = list(self_state_dict.keys())
for i, (_, v) in enumerate(state_dict.items()):
if i > len(self_keys) - 1:
break
self_state_dict[self_keys[i]] = v
self.load_state_dict(self_state_dict)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def make_layer(self, out_channels, num_blocks, stride, dilation,
expand_ratio):
layers = []
for i in range(num_blocks):
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride if i == 0 else 1,
expand_ratio=expand_ratio,
dilation=dilation if i == 0 else 1)
)
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
| true
| true
|
1c465d43539d78553af3d947b0be4daa8319c479
| 20,345
|
py
|
Python
|
tests/python/unittest/test_higher_order_grad.py
|
HaoLiuHust/incubator-mxnet
|
0deb50b33f29a19bbe4bdc6ff14658afc5000d50
|
[
"Apache-2.0"
] | 1
|
2019-02-22T13:53:48.000Z
|
2019-02-22T13:53:48.000Z
|
tests/python/unittest/test_higher_order_grad.py
|
HaoLiuHust/incubator-mxnet
|
0deb50b33f29a19bbe4bdc6ff14658afc5000d50
|
[
"Apache-2.0"
] | 1
|
2020-08-27T06:39:07.000Z
|
2020-08-31T03:29:27.000Z
|
tests/python/unittest/test_higher_order_grad.py
|
HaoLiuHust/incubator-mxnet
|
0deb50b33f29a19bbe4bdc6ff14658afc5000d50
|
[
"Apache-2.0"
] | 1
|
2020-08-14T22:56:19.000Z
|
2020-08-14T22:56:19.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import random
from functools import reduce
from operator import mul
import random
from common import with_seed, xfail_when_nonstandard_decimal_separator
import mxnet
from mxnet import nd, autograd, gluon
from mxnet.test_utils import (
assert_almost_equal, random_arrays, random_uniform_arrays, rand_shape_nd, same)
@with_seed()
def test_sin():
def sin(x):
return nd.sin(x)
def grad_grad_op(x):
return -nd.sin(x)
def grad_grad_grad_op(x):
return -nd.cos(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sin, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sin,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_cos():
def cos(x):
return nd.cos(x)
def grad_grad_op(x):
return -nd.cos(x)
def grad_grad_grad_op(x):
return nd.sin(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cos, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, cos,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_tan():
def tan(x):
return nd.tan(x)
def grad_op(x):
return 1 / nd.cos(x)**2
def grad_grad_op(x):
return 2 * tan(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, tan, grad_grad_op)
@with_seed()
def test_sinh():
def sinh(x):
return nd.sinh(x)
def grad_grad_op(x):
return sinh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sinh, grad_grad_op)
@with_seed()
def test_cosh():
def cosh(x):
return nd.cosh(x)
def grad_grad_op(x):
return cosh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cosh, grad_grad_op)
@with_seed()
def test_tanh():
def tanh(x):
return nd.tanh(x)
def grad_op(x):
return 1 - tanh(x)**2
def grad_grad_op(x):
return -2 * tanh(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_nth_order_unary(array, tanh, grad_op, 1, rtol=1e-6, atol=1e-6)
check_second_order_unary(
array, tanh, grad_grad_op, rtol=1e-6, atol=1e-5)
@with_seed()
def test_arcsin():
def arcsin(x):
return nd.arcsin(x)
def grad_grad_op(x):
return x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arcsin is [-1, 1]
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arcsin, grad_grad_op)
@with_seed()
def test_arccos():
def arccos(x):
return nd.arccos(x)
def grad_grad_op(x):
return -x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arccos is [-1, 1]
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arccos, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctan():
def arctan(x):
return nd.arctan(x)
def grad_grad_op(x):
return (-2 * x)/((1 + x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
# Domain of arctan is all real numbers.
# Scale std_dev
array *= random.randint(500, 10000)
check_second_order_unary(array, arctan, grad_grad_op)
@with_seed()
def test_arcsinh():
def arcsinh(x):
return nd.arcsinh(x)
def grad_grad_op(x):
return x/nd.sqrt((nd.square(x)+1)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arcsinh, grad_grad_op)
@with_seed()
def test_arccosh():
def arccosh(x):
return nd.arccosh(x)
def grad_grad_op(x):
return x/(nd.sqrt(x-1) * nd.sqrt(x+1) * (x+1) * (x-1))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = array * sigma + mu
# Domain of arccosh 1 to infinity.
assert((array > 1).all())
check_second_order_unary(array, arccosh, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctanh():
def arctanh(x):
return nd.arctanh(x)
def grad_grad_op(x):
return (2 * x)/((1 - x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
# Domain of arctanh is (-1, 1)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arctanh, grad_grad_op)
@with_seed()
def test_radians():
def radians(x):
return nd.radians(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, radians, grad_grad_op)
@with_seed()
def test_relu():
def relu(x):
return nd.relu(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, relu, grad_grad_op)
@with_seed()
def test_log():
def log(x):
return nd.log(x)
def grad_op(x):
return 1/x
def grad_grad_op(x):
return -1/(x**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, log, [grad_op, grad_grad_op], [1, 2])
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log2():
def log2(x):
return nd.log2(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(2))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log2, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log10():
def log10(x):
return nd.log10(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(10))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log10, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_square():
def grad_grad_op(x):
return nd.ones_like(x) * 2
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.square, grad_grad_op)
@with_seed()
def test_expm1():
def grad_grad_op(x):
return nd.exp(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.expm1, grad_grad_op)
@with_seed()
def test_log1p():
def grad_grad_op(x):
return -1/((1+x)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.log1p, grad_grad_op)
@with_seed()
def test_reciprocal():
def reciprocal(x):
return nd.reciprocal(x)
def grad_grad_op(x):
return 2 / x**3
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, reciprocal, grad_grad_op)
@with_seed()
def test_abs():
def abs(x):
return nd.abs(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, abs, grad_grad_op)
@with_seed()
def test_clip():
def clip(x):
a_min, a_max = sorted([random.random(), random.random()])
return nd.clip(x, a_min, a_max)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, clip, grad_grad_op)
@with_seed()
def test_dropout():
def dropout(x):
return nd.Dropout(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, dropout, grad_grad_op)
@with_seed()
def test_sigmoid():
def sigmoid(x):
return nd.sigmoid(x)
def grad_op(x):
return sigmoid(x) * (1 - sigmoid(x))
def grad_grad_op(x):
return grad_op(x) * (1 - 2 * sigmoid(x))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sigmoid, grad_grad_op)
# TODO(kshitij12345): Remove
check_nth_order_unary(array, sigmoid, [grad_op, grad_grad_op], [1, 2])
check_nth_order_unary(array, sigmoid, grad_grad_op, 2)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sqrt():
def sqrt(x):
return nd.sqrt(x)
def grad_grad_op(x):
return -1/(4 * sqrt(x**3))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, sqrt, grad_grad_op)
@with_seed()
def test_cbrt():
def cbrt(x):
return nd.cbrt(x)
def grad_grad_op(x):
return -2/(9 * cbrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, cbrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rsqrt():
def rsqrt(x):
return nd.rsqrt(x)
def grad_grad_op(x):
return 3/(4 * nd.sqrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, rsqrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rcbrt():
def rcbrt(x):
return nd.rcbrt(x)
def grad_grad_op(x):
return 4/(9 * nd.cbrt(x**7))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
# Only positive numbers
assert((array > 0).all())
check_second_order_unary(array, rcbrt, grad_grad_op)
def check_second_order_unary(x, op, grad_grad_op, rtol=None, atol=None):
check_nth_order_unary(x, op, grad_grad_op, 2, rtol, atol)
def check_nth_order_unary(x, op, grad_ops, orders, rtol=None, atol=None):
"""Assert n-th order autograd gradient against expected gradient.
Multiple order of gradients can be checked by passing list of
function computing the particular order gradient and passing the
corresponding list of order.
Note
----
1. Orders should always be monotonically increasing.
2. Elements of grads_ops should correspond to elements of orders
i.e. grads_op = [grad_op, grad_grad_grad_op] should be passed with
orders = [1, 3]
Parameters
----------
x : mxnet.NDArray
Input Array.
op : Callable
Operation to perform on Input Array.
grad_ops : Callable or List of Callable
Function to compute and assert gradient of given order.
orders : int or List of int
Order/s to assert expected and computed gradients.
Returns
-------
None
"""
if isinstance(orders, int):
orders = [orders]
grad_ops = [grad_ops]
assert all(i < j for i, j in zip(orders[0:-1], orders[1:])), \
"orders should be monotonically increasing"
assert len(set(orders)) == len(orders), \
"orders should have unique elements"
highest_order = max(orders)
x = nd.array(x)
x.attach_grad()
expected_grads = [grad_op(x) for grad_op in grad_ops]
computed_grads = []
head_grads = []
# Perform compute.
with autograd.record():
y = op(x)
for current_order in range(1, highest_order+1):
head_grad = nd.random.normal(shape=x.shape)
y = autograd.grad(heads=y, variables=x, head_grads=head_grad,
create_graph=True, retain_graph=True)[0]
if current_order in orders:
computed_grads.append(y)
head_grads.append(head_grad)
# Validate all the gradients.
for order, grad, computed_grad in \
zip(orders, expected_grads, computed_grads):
# Compute expected values.
expected_grad = grad.asnumpy()
for head_grad in head_grads[:order]:
expected_grad *= head_grad.asnumpy()
assert_almost_equal(
expected_grad, computed_grad.asnumpy(), rtol=rtol, atol=atol)
def arange_shape_like(y):
shape = y.shape
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
class NDArrayGenerator(object):
def __init__(self, dim, startdim=1):
self.dim = dim
self.curdim = startdim
def __iter__(self):
return self
@staticmethod
def gen(dimensions):
shape = rand_shape_nd(dimensions, 4)
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
def next(self):
return self.__next__()
def __next__(self):
if self.curdim > self.dim:
raise StopIteration
x = NDArrayGenerator.gen(self.curdim)
self.curdim += 1
return x
def flatten2d_right(x):
s_0 = x.shape[0]
s_1 = reduce(mul, x.shape[1:])
return x.reshape((s_0, s_1))
def flatten2d_left(x):
s_0 = reduce(mul, x.shape[:-1])
s_1 = x.shape[-1]
return x.reshape((s_0, s_1))
@with_seed()
def test_dense_backward_flatten():
print("2nd order gradient for Fully Connected, flatten=True")
for x in NDArrayGenerator(4,2):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=True))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
assert w_grad.shape == w.shape
assert w_grad_grad.shape == w.shape
assert x_grad.shape == x.shape
assert x_grad_grad.shape == x.shape
w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e))
w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e))
x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e))
x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
@with_seed()
def test_dense_backward_no_flatten():
print("2nd order gradient for Fully Connected, flatten=False")
for x in NDArrayGenerator(5,3):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=False))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y) # head gradient of y
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
# Expected results
o_y = flatten2d_left(o_y)
x = flatten2d_left(x)
o_x_grad = flatten2d_left(o_x_grad)
o_w_grad = flatten2d_left(o_w_grad)
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e))
x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
| 28.454545
| 105
| 0.621627
|
import math
import random
from functools import reduce
from operator import mul
import random
from common import with_seed, xfail_when_nonstandard_decimal_separator
import mxnet
from mxnet import nd, autograd, gluon
from mxnet.test_utils import (
assert_almost_equal, random_arrays, random_uniform_arrays, rand_shape_nd, same)
@with_seed()
def test_sin():
def sin(x):
return nd.sin(x)
def grad_grad_op(x):
return -nd.sin(x)
def grad_grad_grad_op(x):
return -nd.cos(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sin, grad_grad_op)
check_nth_order_unary(array, sin,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_cos():
def cos(x):
return nd.cos(x)
def grad_grad_op(x):
return -nd.cos(x)
def grad_grad_grad_op(x):
return nd.sin(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cos, grad_grad_op)
check_nth_order_unary(array, cos,
[grad_grad_op, grad_grad_grad_op], [2, 3])
@with_seed()
def test_tan():
def tan(x):
return nd.tan(x)
def grad_op(x):
return 1 / nd.cos(x)**2
def grad_grad_op(x):
return 2 * tan(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, tan, grad_grad_op)
@with_seed()
def test_sinh():
def sinh(x):
return nd.sinh(x)
def grad_grad_op(x):
return sinh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sinh, grad_grad_op)
@with_seed()
def test_cosh():
def cosh(x):
return nd.cosh(x)
def grad_grad_op(x):
return cosh(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, cosh, grad_grad_op)
@with_seed()
def test_tanh():
def tanh(x):
return nd.tanh(x)
def grad_op(x):
return 1 - tanh(x)**2
def grad_grad_op(x):
return -2 * tanh(x) * grad_op(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_nth_order_unary(array, tanh, grad_op, 1, rtol=1e-6, atol=1e-6)
check_second_order_unary(
array, tanh, grad_grad_op, rtol=1e-6, atol=1e-5)
@with_seed()
def test_arcsin():
def arcsin(x):
return nd.arcsin(x)
def grad_grad_op(x):
return x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arcsin, grad_grad_op)
@with_seed()
def test_arccos():
def arccos(x):
return nd.arccos(x)
def grad_grad_op(x):
return -x / nd.sqrt((1-x**2)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arccos, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctan():
def arctan(x):
return nd.arctan(x)
def grad_grad_op(x):
return (-2 * x)/((1 + x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array *= random.randint(500, 10000)
check_second_order_unary(array, arctan, grad_grad_op)
@with_seed()
def test_arcsinh():
def arcsinh(x):
return nd.arcsinh(x)
def grad_grad_op(x):
return x/nd.sqrt((nd.square(x)+1)**3)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, arcsinh, grad_grad_op)
@with_seed()
def test_arccosh():
def arccosh(x):
return nd.arccosh(x)
def grad_grad_op(x):
return x/(nd.sqrt(x-1) * nd.sqrt(x+1) * (x+1) * (x-1))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = array * sigma + mu
assert((array > 1).all())
check_second_order_unary(array, arccosh, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_arctanh():
def arctanh(x):
return nd.arctanh(x)
def grad_grad_op(x):
return (2 * x)/((1 - x**2)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_uniform_arrays(shape, low=-0.99, high=0.99)[0]
check_second_order_unary(array, arctanh, grad_grad_op)
@with_seed()
def test_radians():
def radians(x):
return nd.radians(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, radians, grad_grad_op)
@with_seed()
def test_relu():
def relu(x):
return nd.relu(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, relu, grad_grad_op)
@with_seed()
def test_log():
def log(x):
return nd.log(x)
def grad_op(x):
return 1/x
def grad_grad_op(x):
return -1/(x**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log, grad_grad_op)
check_nth_order_unary(array, log, [grad_op, grad_grad_op], [1, 2])
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log2():
def log2(x):
return nd.log2(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(2))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log2, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_log10():
def log10(x):
return nd.log10(x)
def grad_grad_op(x):
return -1/((x**2) * math.log(10))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, log10, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_square():
def grad_grad_op(x):
return nd.ones_like(x) * 2
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.square, grad_grad_op)
@with_seed()
def test_expm1():
def grad_grad_op(x):
return nd.exp(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.expm1, grad_grad_op)
@with_seed()
def test_log1p():
def grad_grad_op(x):
return -1/((1+x)**2)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, nd.log1p, grad_grad_op)
@with_seed()
def test_reciprocal():
def reciprocal(x):
return nd.reciprocal(x)
def grad_grad_op(x):
return 2 / x**3
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, reciprocal, grad_grad_op)
@with_seed()
def test_abs():
def abs(x):
return nd.abs(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, abs, grad_grad_op)
@with_seed()
def test_clip():
def clip(x):
a_min, a_max = sorted([random.random(), random.random()])
return nd.clip(x, a_min, a_max)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, clip, grad_grad_op)
@with_seed()
def test_dropout():
def dropout(x):
return nd.Dropout(x)
def grad_grad_op(x):
return nd.zeros_like(x)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, dropout, grad_grad_op)
@with_seed()
def test_sigmoid():
def sigmoid(x):
return nd.sigmoid(x)
def grad_op(x):
return sigmoid(x) * (1 - sigmoid(x))
def grad_grad_op(x):
return grad_op(x) * (1 - 2 * sigmoid(x))
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
check_second_order_unary(array, sigmoid, grad_grad_op)
check_nth_order_unary(array, sigmoid, [grad_op, grad_grad_op], [1, 2])
check_nth_order_unary(array, sigmoid, grad_grad_op, 2)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sqrt():
def sqrt(x):
return nd.sqrt(x)
def grad_grad_op(x):
return -1/(4 * sqrt(x**3))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, sqrt, grad_grad_op)
@with_seed()
def test_cbrt():
def cbrt(x):
return nd.cbrt(x)
def grad_grad_op(x):
return -2/(9 * cbrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, cbrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rsqrt():
def rsqrt(x):
return nd.rsqrt(x)
def grad_grad_op(x):
return 3/(4 * nd.sqrt(x**5))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, rsqrt, grad_grad_op)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rcbrt():
def rcbrt(x):
return nd.rcbrt(x)
def grad_grad_op(x):
return 4/(9 * nd.cbrt(x**7))
sigma = random.randint(25, 100)
mu = random.randint(500, 1000)
for dim in range(1, 5):
shape = rand_shape_nd(dim)
array = random_arrays(shape)
array = sigma * array + mu
assert((array > 0).all())
check_second_order_unary(array, rcbrt, grad_grad_op)
def check_second_order_unary(x, op, grad_grad_op, rtol=None, atol=None):
check_nth_order_unary(x, op, grad_grad_op, 2, rtol, atol)
def check_nth_order_unary(x, op, grad_ops, orders, rtol=None, atol=None):
if isinstance(orders, int):
orders = [orders]
grad_ops = [grad_ops]
assert all(i < j for i, j in zip(orders[0:-1], orders[1:])), \
"orders should be monotonically increasing"
assert len(set(orders)) == len(orders), \
"orders should have unique elements"
highest_order = max(orders)
x = nd.array(x)
x.attach_grad()
expected_grads = [grad_op(x) for grad_op in grad_ops]
computed_grads = []
head_grads = []
with autograd.record():
y = op(x)
for current_order in range(1, highest_order+1):
head_grad = nd.random.normal(shape=x.shape)
y = autograd.grad(heads=y, variables=x, head_grads=head_grad,
create_graph=True, retain_graph=True)[0]
if current_order in orders:
computed_grads.append(y)
head_grads.append(head_grad)
for order, grad, computed_grad in \
zip(orders, expected_grads, computed_grads):
expected_grad = grad.asnumpy()
for head_grad in head_grads[:order]:
expected_grad *= head_grad.asnumpy()
assert_almost_equal(
expected_grad, computed_grad.asnumpy(), rtol=rtol, atol=atol)
def arange_shape_like(y):
shape = y.shape
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
class NDArrayGenerator(object):
def __init__(self, dim, startdim=1):
self.dim = dim
self.curdim = startdim
def __iter__(self):
return self
@staticmethod
def gen(dimensions):
shape = rand_shape_nd(dimensions, 4)
nelems = reduce(mul, shape)
x = nd.arange(nelems).reshape(shape)
return x
def next(self):
return self.__next__()
def __next__(self):
if self.curdim > self.dim:
raise StopIteration
x = NDArrayGenerator.gen(self.curdim)
self.curdim += 1
return x
def flatten2d_right(x):
s_0 = x.shape[0]
s_1 = reduce(mul, x.shape[1:])
return x.reshape((s_0, s_1))
def flatten2d_left(x):
s_0 = reduce(mul, x.shape[:-1])
s_1 = x.shape[-1]
return x.reshape((s_0, s_1))
@with_seed()
def test_dense_backward_flatten():
print("2nd order gradient for Fully Connected, flatten=True")
for x in NDArrayGenerator(4,2):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=True))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y)
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
assert w_grad.shape == w.shape
assert w_grad_grad.shape == w.shape
assert x_grad.shape == x.shape
assert x_grad_grad.shape == x.shape
w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e))
w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e))
x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e))
x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
@with_seed()
def test_dense_backward_no_flatten():
print("2nd order gradient for Fully Connected, flatten=False")
for x in NDArrayGenerator(5,3):
hidden = random.randrange(1, 4)
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(hidden, flatten=False))
net.initialize(mxnet.initializer.Constant(.5))
x.attach_grad()
with autograd.record():
y = net.forward(x)
o_y = arange_shape_like(y)
params = [p.data() for p in net.collect_params().values()]
w = params[0]
b = params[1]
print("Checking y ({}) = x({}) * w^T({}) + b({})".format(y.shape, x.shape, w.shape, b.shape))
x_grad = autograd.grad(heads=y, variables=x, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_x_grad = arange_shape_like(x_grad)
w_grad_grad = autograd.grad(heads=x_grad, variables=w,
head_grads=o_x_grad, create_graph=False)[0]
w_grad = autograd.grad(heads=y, variables=w, head_grads=o_y,
create_graph=True, retain_graph=True)[0]
o_w_grad = arange_shape_like(w_grad)
x_grad_grad = autograd.grad(heads=w_grad, variables=x,
head_grads=o_w_grad, create_graph=False)[0]
o_y = flatten2d_left(o_y)
x = flatten2d_left(x)
o_x_grad = flatten2d_left(o_x_grad)
o_w_grad = flatten2d_left(o_w_grad)
w_grad_e = nd.dot(o_y, x, transpose_a=True)
w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
x_grad_e = nd.dot(o_y, w)
x_grad_grad_e = nd.dot(o_y, o_w_grad)
w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e))
x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e))
assert x_grad_check
assert w_grad_check
assert x_grad_grad_check
assert w_grad_grad_check
| true
| true
|
1c465dd88414760419bb1ffb6b9b757ef5581d36
| 627
|
py
|
Python
|
runs/seq-nobro-iter03000.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
runs/seq-nobro-iter03000.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
runs/seq-nobro-iter03000.cfg.py
|
janpawellek/broeval
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
[
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/seq-nobro-iter03000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 3000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| 21.62069
| 68
| 0.722488
|
OUTFILE = 'runs/seq-nobro-iter03000.result.csv'
SOURCE = ['10.0.0.1']
SOURCE_BRO = [False]
TARGET = ['10.0.0.2']
TARGET_BRO = [False]
MODE = 'seq'
EPOCHS = 100
ITER = 3000
SIZE = 5
| true
| true
|
1c465eea594f4a857f85aba181b0c6af1aa42352
| 5,672
|
py
|
Python
|
EvaluateAccuracy.py
|
sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent
|
2c78f069d4f4d9be7197b5bff6df39fc239270e4
|
[
"MIT"
] | 5
|
2021-01-21T05:04:33.000Z
|
2021-12-19T09:49:35.000Z
|
EvaluateAccuracy.py
|
sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent
|
2c78f069d4f4d9be7197b5bff6df39fc239270e4
|
[
"MIT"
] | 2
|
2019-11-13T17:35:41.000Z
|
2021-06-04T21:40:57.000Z
|
EvaluateAccuracy.py
|
sagieppel/Classification-of-the-material-given-region-of-an-image-using-a-convolutional-neural-net-with-attent
|
2c78f069d4f4d9be7197b5bff6df39fc239270e4
|
[
"MIT"
] | 1
|
2021-12-19T09:49:29.000Z
|
2021-12-19T09:49:29.000Z
|
# Evaluate precision of image classification in a given image region
# Instructions:
# a) Set folder of images in Image_Dir
# c) Set folder for ground truth Annotation in AnnotationDir
# The Label Maps should be saved as png image with same name as the corresponding image and png ending. The value of each pixel correspond to it class
# d) Set number of classes number in NUM_CLASSES
# e) Set path to trained model weights in Trained_model_path
# e) Run script
##########################################################################################################################################################################
import Reader as Reader
import torch
import numpy as np
import AttentionNet as Net
#...........................................Input Parameters.................................................
UseCuda=True
ImageDir="ExampleData/TrainVal_Set/Images/"
AnnotationDir="ExampleData/TrainVal_Set/Annotations/"
Trained_model_path="logs/WeightRegionMaterialClassificationOpenSurface.torch" # If you want tos start from pretrained model
EvaluationFile=Trained_model_path.replace(".torch","Eval.xls")
NumClasses=44 # Number of classes if -1 read num classes from the reader
BackgroundClass=0 # Marking for background/unknown class that will be ignored
#---------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
Reader = Reader.Reader(ImageDir=ImageDir, AnnotationDir=AnnotationDir,NumClasses=NumClasses,BackgroundClass=BackgroundClass)
if NumClasses==-1: NumClasses = Reader.NumClass+1
#---------------------Load an initiate Initiate neural net------------------------------------------------------------------------------------
Net=Net.Net(NumClasses=NumClasses,UseGPU=UseCuda)
Net.AddAttententionLayer()
Net.load_state_dict(torch.load(Trained_model_path))
if UseCuda: Net.cuda()
Net.eval()
#==============================Region size ranges in pixesl=============================================================================================
Sizes=[1000,2000,4000,8000,16000,32000,64000,128000,256000,500000,1000000] #sizes pixels
NumSizes=len(Sizes)
#--------------------Evaluate net accuracy---------------------------------------------------------------------------------
TP=np.zeros([Reader.NumClass+1],dtype=np.float64) # True positive per class
FP=np.zeros([Reader.NumClass+1],dtype=np.float64) # False positive per class
FN=np.zeros([Reader.NumClass+1],dtype=np.float64) # False Negative per class
SumPred=np.zeros([Reader.NumClass+1],dtype=np.float64)
SzTP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # True positive per class per size
SzFP=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False positive per class per size
SzFN=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64) # False Negative per class per size
SzSumPred=np.zeros([Reader.NumClass+1,NumSizes],dtype=np.float64)
# Counter of segment of specific class appearence
uu=0
while (Reader.ImageN<len(Reader.FileList)):
# for i,sz in enumerate(Sizes):
Images, SegmentMask, Labels, LabelsOneHot = Reader.ReadNextImageClean()
uu+=1
print(uu)
BatchSize = Images.shape[0]
for i in range(BatchSize):
#.........................Use net to make predicition.........................................
Prob, Lb = Net.forward(Images[i:i+1], ROI=SegmentMask[i:i+1],EvalMode=True) # Run net inference and get prediction
PredLb = Lb.data.cpu().numpy()
#.................................Evaluate accuracy per size range......................................................
LbSize=SegmentMask[i].sum()
SzInd=-1
for f,sz in enumerate(Sizes): # Find size range of the ROI region
if LbSize<sz:
SzInd=f
break
if PredLb[0] == Labels[i]:
# print("Correct")
TP[Labels[i]] += 1
SzTP[Labels[i],SzInd] += 1
else:
# print("Wrong")
FN[Labels[i]] += 1
FP[PredLb[0]] += 1
SzFN[Labels[i],SzInd] += 1
SzFP[PredLb[0],SzInd] += 1
SumPred[Labels[i]] += 1
SzSumPred[Labels[i],SzInd] += 1
#==============================Write to file=======================================================================
f = open(EvaluationFile, "w")
NrmF=len(SumPred)/(np.sum(SumPred>0)) # Normalization factor for classes with zero occurrences
txt="Mean Accuracy All Class Average =\t"+ str((TP/(SumPred+0.00000001)).mean()*NrmF*100)+"%"+"\r\n"
print(txt)
f.write(txt)
txt="Mean Accuracy Images =\t"+ str((TP.mean()/SumPred.mean())*100)+"%"+"\r\n"
print(txt)
f.write(txt)
print("\r\n=============================================================================\r\n")
print(txt)
f.write(txt)
txt="SizeMax\tMeanClasses\tMeanGlobal\tNum Instances\tNumValidClasses\r\n"
print(txt)
f.write(txt)
for i,sz in enumerate(Sizes):
if SzSumPred[:,i].sum()==0: continue
NumValidClass=np.sum(SzSumPred[:, i] > 0)
NrmF = len(SzSumPred[:,i]) / NumValidClass # Normalization factor for classes with zero occurrences
txt=str(sz)+"\t"+str((SzTP[:,i]/(SzSumPred[:,i]+0.00001)).mean()*NrmF*100)+"%\t"+str(100*(SzTP[:,i]).mean()/(SzSumPred[:,i].mean()))+"%\t"+str(SzSumPred[:,i].sum())+"\t"+str(NumValidClass)+"\r\n"
print(txt)
f.write(txt)
f.close()
| 50.19469
| 199
| 0.542666
| true
| true
|
|
1c465fea1d1ceec23b4315681cacca75310c7202
| 27,098
|
py
|
Python
|
numpy/core/tests/test_casting_unittests.py
|
HanumanJat8698/numpy
|
cbec2c8054ea6150490b9e72eb051848b79344d1
|
[
"BSD-3-Clause"
] | 1
|
2022-02-26T03:35:36.000Z
|
2022-02-26T03:35:36.000Z
|
numpy/core/tests/test_casting_unittests.py
|
HanumanJat8698/numpy
|
cbec2c8054ea6150490b9e72eb051848b79344d1
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_casting_unittests.py
|
HanumanJat8698/numpy
|
cbec2c8054ea6150490b9e72eb051848b79344d1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import itertools
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
# Simple skips object, parametric and long double (unsupported by struct)
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
# Remove l and L, the table was generated with 64bit linux in mind.
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1 # adds one character for the sign
return length
# Note: Can't do dtype comparison for longdouble on windows
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
cast_is_view = 1 << 16
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
These test cases excercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if casting & Casting.cast_is_view:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.cast_is_view
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = (casting & ~Casting.cast_is_view) <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no | Casting.cast_is_view
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
| 41.057576
| 84
| 0.565983
|
import pytest
import textwrap
import enum
import itertools
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1
return length
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
cast_is_view = 1 << 16
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if casting & Casting.cast_is_view:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no | Casting.cast_is_view
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = (casting & ~Casting.cast_is_view) <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res) = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert(casting & CAST_TABLE[from_Dt][type(time_dt)])
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no | Casting.cast_is_view
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
| true
| true
|
1c46600ef51420118bf2adf803f33064109e861f
| 2,286
|
py
|
Python
|
venv/Lib/site-packages/tests/test_310_ClientInfo.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/tests/test_310_ClientInfo.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/tests/test_310_ClientInfo.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_310_ClientInfo(self):
obj = IbmDbTestFunctions()
obj.assert_expectf(self.run_test_310)
def run_test_310(self):
conn = ibm_db.connect(config.database, config.user, config.password)
client = ibm_db.client_info(conn)
if client:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(client.DRIVER_NAME), client.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(client.DRIVER_VER), client.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(client.DATA_SOURCE_NAME), client.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(client.DRIVER_ODBC_VER), client.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(client.ODBC_VER), client.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(client.ODBC_SQL_CONFORMANCE), client.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % client.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % client.CONN_CODEPAGE)
ibm_db.close(conn)
else:
print("Error.")
#__END__
#__LUW_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__ZOS_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__SYSTEMI_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
#__IDS_EXPECTED__
#DRIVER_NAME: string(%d) %s
#DRIVER_VER: string(%d) %s
#DATA_SOURCE_NAME: string(%d) %s
#DRIVER_ODBC_VER: string(%d) %s
#ODBC_VER: string(%d) %s
#ODBC_SQL_CONFORMANCE: string(%d) %s
#APPL_CODEPAGE: int(%d)
#CONN_CODEPAGE: int(%d)
| 30.891892
| 120
| 0.700787
|
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_310_ClientInfo(self):
obj = IbmDbTestFunctions()
obj.assert_expectf(self.run_test_310)
def run_test_310(self):
conn = ibm_db.connect(config.database, config.user, config.password)
client = ibm_db.client_info(conn)
if client:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(client.DRIVER_NAME), client.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(client.DRIVER_VER), client.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(client.DATA_SOURCE_NAME), client.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(client.DRIVER_ODBC_VER), client.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(client.ODBC_VER), client.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(client.ODBC_SQL_CONFORMANCE), client.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % client.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % client.CONN_CODEPAGE)
ibm_db.close(conn)
else:
print("Error.")
| true
| true
|
1c4660eee4c36b65b45ca71a3dfd9c51e6edccdc
| 1,545
|
py
|
Python
|
postprocessing.py
|
BaerkeDestroyer/tiktok-rss-flat
|
ec96d901b5d40c0563658c469a6308546e78d0e2
|
[
"Apache-2.0"
] | null | null | null |
postprocessing.py
|
BaerkeDestroyer/tiktok-rss-flat
|
ec96d901b5d40c0563658c469a6308546e78d0e2
|
[
"Apache-2.0"
] | null | null | null |
postprocessing.py
|
BaerkeDestroyer/tiktok-rss-flat
|
ec96d901b5d40c0563658c469a6308546e78d0e2
|
[
"Apache-2.0"
] | null | null | null |
from TikTokApi import TikTokApi
import csv
from feedgen.feed import FeedGenerator
from datetime import datetime, timezone
# Normal GitHub Pages URL
# ghPagesURL = "https://conoro.github.io/tiktok-rss-flat/"
# Custom Domain
ghPagesURL = "https://baerkedestroyer.github.io/tiktok-rss-flat/"
api = TikTokApi.get_instance()
count = 10
with open('subscriptions.csv') as f:
cf = csv.DictReader(f, fieldnames=['username'])
for row in cf:
user = row['username']
print (user)
tiktoks = api.by_username(user, count=count)
fg = FeedGenerator()
fg.id('https://www.tiktok.com/@' + user)
fg.title(user + ' TikTok')
fg.author( {'name':'Conor ONeill','email':'conor@conoroneill.com'} )
fg.link( href='http://tiktok.com', rel='alternate' )
fg.logo(ghPagesURL + '/tiktok-rss.png')
fg.subtitle('OK Boomer, all the latest TikToks from ' + user)
fg.link( href=ghPagesURL + 'rss/' + user + '.xml', rel='self' )
fg.language('en')
for tiktok in tiktoks:
fe = fg.add_entry()
link = "https://www.tiktok.com/@" + user + "/video/" + tiktok['id']
fe.id(link)
fe.published(datetime.fromtimestamp(tiktok['createTime'], timezone.utc))
fe.title(tiktok['desc'])
fe.link(href=link)
fe.description("<img src='" + tiktok['video']['cover'] + "' />")
fg.rss_file('rss/' + user + '.xml') # Write the RSS feed to a file
| 34.333333
| 85
| 0.579935
|
from TikTokApi import TikTokApi
import csv
from feedgen.feed import FeedGenerator
from datetime import datetime, timezone
ghPagesURL = "https://baerkedestroyer.github.io/tiktok-rss-flat/"
api = TikTokApi.get_instance()
count = 10
with open('subscriptions.csv') as f:
cf = csv.DictReader(f, fieldnames=['username'])
for row in cf:
user = row['username']
print (user)
tiktoks = api.by_username(user, count=count)
fg = FeedGenerator()
fg.id('https://www.tiktok.com/@' + user)
fg.title(user + ' TikTok')
fg.author( {'name':'Conor ONeill','email':'conor@conoroneill.com'} )
fg.link( href='http://tiktok.com', rel='alternate' )
fg.logo(ghPagesURL + '/tiktok-rss.png')
fg.subtitle('OK Boomer, all the latest TikToks from ' + user)
fg.link( href=ghPagesURL + 'rss/' + user + '.xml', rel='self' )
fg.language('en')
for tiktok in tiktoks:
fe = fg.add_entry()
link = "https://www.tiktok.com/@" + user + "/video/" + tiktok['id']
fe.id(link)
fe.published(datetime.fromtimestamp(tiktok['createTime'], timezone.utc))
fe.title(tiktok['desc'])
fe.link(href=link)
fe.description("<img src='" + tiktok['video']['cover'] + "' />")
fg.rss_file('rss/' + user + '.xml')
| true
| true
|
1c46616705638a9d0e9b20f08577b7cad14f9b79
| 459
|
py
|
Python
|
config.example.py
|
entuland/fogibot
|
e3afe14d53fe9d47178161d9311301c47c960507
|
[
"MIT"
] | null | null | null |
config.example.py
|
entuland/fogibot
|
e3afe14d53fe9d47178161d9311301c47c960507
|
[
"MIT"
] | null | null | null |
config.example.py
|
entuland/fogibot
|
e3afe14d53fe9d47178161d9311301c47c960507
|
[
"MIT"
] | null | null | null |
host = "chat.example.com"
port = 6697
username = "username"
password = "password"
botname = "botname"
realname = "realname"
owner = "owner"
trigger = botname
channels = [
"##" + botname,
]
sharing_bins = [
"cpy.pt (generic pastes), gist.github.com (multiple files pastes)",
"jsfiddle.net, codepen.io (HTML+CSS+JS IDEs)",
"ideone.com (runnable code - C, C++, Python etc.)",
"postimage.io (family safe images), pasteconf.net (conf files)"
]
| 25.5
| 71
| 0.657952
|
host = "chat.example.com"
port = 6697
username = "username"
password = "password"
botname = "botname"
realname = "realname"
owner = "owner"
trigger = botname
channels = [
"##" + botname,
]
sharing_bins = [
"cpy.pt (generic pastes), gist.github.com (multiple files pastes)",
"jsfiddle.net, codepen.io (HTML+CSS+JS IDEs)",
"ideone.com (runnable code - C, C++, Python etc.)",
"postimage.io (family safe images), pasteconf.net (conf files)"
]
| true
| true
|
1c466226c6dae77cdef9d5c22b9f63c343a0eb11
| 933
|
py
|
Python
|
bindings/python/src/test/test_package_dependencies.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 9
|
2018-07-02T15:21:40.000Z
|
2021-11-24T03:44:39.000Z
|
bindings/python/src/test/test_package_dependencies.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 8
|
2019-01-08T22:06:12.000Z
|
2022-03-16T15:02:37.000Z
|
bindings/python/src/test/test_package_dependencies.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 1
|
2021-12-06T19:08:05.000Z
|
2021-12-06T19:08:05.000Z
|
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: support@cloudsmith.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.models.package_dependencies import PackageDependencies
class TestPackageDependencies(unittest.TestCase):
""" PackageDependencies unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPackageDependencies(self):
"""
Test PackageDependencies
"""
# FIXME: construct object with mandatory attributes with example values
#model = cloudsmith_api.models.package_dependencies.PackageDependencies()
pass
if __name__ == '__main__':
unittest.main()
| 20.733333
| 81
| 0.713826
|
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.models.package_dependencies import PackageDependencies
class TestPackageDependencies(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testPackageDependencies(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c466290ee5308ecc91a711df4d496fe19a9680e
| 674
|
py
|
Python
|
manage.py
|
loyer-yuan/REMVocabulary
|
d86965600f1951c67558b8946bcfd6317d345153
|
[
"MIT"
] | 1
|
2021-12-09T09:26:23.000Z
|
2021-12-09T09:26:23.000Z
|
manage.py
|
loyer-yuan/REMVocabulary
|
d86965600f1951c67558b8946bcfd6317d345153
|
[
"MIT"
] | 1
|
2021-12-07T13:01:23.000Z
|
2021-12-12T13:53:47.000Z
|
manage.py
|
loyer-yuan/REMVocabulary
|
d86965600f1951c67558b8946bcfd6317d345153
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'REMVocabulary_DBMS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.304348
| 82
| 0.683976
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'REMVocabulary_DBMS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.