hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acff06a19ab4fc8ef846bc9f0ec98a5957fe38ae
| 1,465
|
py
|
Python
|
Utils.py
|
spidertyler2005/OddDataHosting
|
56cc378868e0f70f3158e2d1dec39619a0bfc072
|
[
"MIT"
] | null | null | null |
Utils.py
|
spidertyler2005/OddDataHosting
|
56cc378868e0f70f3158e2d1dec39619a0bfc072
|
[
"MIT"
] | null | null | null |
Utils.py
|
spidertyler2005/OddDataHosting
|
56cc378868e0f70f3158e2d1dec39619a0bfc072
|
[
"MIT"
] | null | null | null |
import hashlib,json,rsa,base64,os
from cryptography.hazmat.backends import default_backend
from cryptography.fernet import Fernet,MultiFernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def GenKey(filename,size, name, password = b"Passw0rd"):
remote = Fernet.generate_key()+b"\n"+Fernet.generate_key()
salt = os.urandom(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
with open(f"Data/Keys/{filename}.key",'wb') as f:
f.write(remote+b'\n'+base64.urlsafe_b64encode(kdf.derive(password)))
with open(f"Data/Keys/Remote_{name}.key",'wb') as f:
f.write(remote+b'\n'+salt)
def open_Key(name):
key = b''
with open(f"Data/Keys/{name}.key",'rb') as f:
key = f.read()
keys = key.split(b'\n')
output = MultiFernet([Fernet(keys[0]),Fernet(keys[1]),Fernet(keys[2])])
return output
def hashString(text):
result = hashlib.sha256(text.encode())
return result.hexdigest()
def get_Config():
output = {}
with open("Data/config.json",'r') as f:
output = json.loads(f.read())
return output
class Colors:
GREEN = "\u001b[32m"
RED = "\u001b[31m"
BRED = "\u001b[31;1m"
CYAN = "\u001b[36m"
YELLOW = "\u001b[33m"
BOLD = "\u001b[1m"
BMAGENTA = "\u001b[35;1m"
RESET = "\u001b[0m"
| 30.520833
| 76
| 0.645051
|
acff06ab0ab2e2e54bba7382146ca17d939ee6df
| 41,704
|
py
|
Python
|
tests/test_torch_agent.py
|
duongtrungkien/Parlai_Finnish
|
e0335b55138093f2a1fb214c73593cc97e71043f
|
[
"MIT"
] | null | null | null |
tests/test_torch_agent.py
|
duongtrungkien/Parlai_Finnish
|
e0335b55138093f2a1fb214c73593cc97e71043f
|
[
"MIT"
] | null | null | null |
tests/test_torch_agent.py
|
duongtrungkien/Parlai_Finnish
|
e0335b55138093f2a1fb214c73593cc97e71043f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Unit tests for TorchAgent.
"""
import os
import unittest
from parlai.core.agents import create_agent_from_shared
from parlai.utils.testing import tempdir
from parlai.utils.misc import Message
SKIP_TESTS = False
try:
from parlai.core.torch_agent import Output
from parlai.agents.test_agents.test_agents import MockTorchAgent, MockDict
import torch
except ImportError:
SKIP_TESTS = True
def get_agent(**kwargs):
r"""
Return opt-initialized agent.
:param kwargs: any kwargs you want to set using parser.set_params(\*\*kwargs)
"""
if 'no_cuda' not in kwargs:
kwargs['no_cuda'] = True
from parlai.core.params import ParlaiParser
parser = ParlaiParser()
MockTorchAgent.add_cmdline_args(parser, partial_opt=None)
parser.set_params(**kwargs)
opt = parser.parse_args([])
return MockTorchAgent(opt)
@unittest.skipIf(SKIP_TESTS, "Torch not installed.")
class TestTorchAgent(unittest.TestCase):
"""
Basic tests on the util functions in TorchAgent.
"""
def test_mock(self):
"""
Just make sure we can instantiate a mock agent.
"""
agent = get_agent()
self.assertTrue(isinstance(agent.dict, MockDict))
def test_share(self):
"""
Make sure share works and shares dictionary.
"""
agent = get_agent()
shared = agent.share()
self.assertTrue('dict' in shared)
def test__vectorize_text(self):
"""
Test _vectorize_text and its different options.
"""
agent = get_agent()
text = "I'm sorry, Dave"
# test add_start and add_end
vec = agent._vectorize_text(text, add_start=False, add_end=False)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [1, 2, 3])
vec = agent._vectorize_text(text, add_start=True, add_end=False)
self.assertEqual(len(vec), 4)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3])
vec = agent._vectorize_text(text, add_start=False, add_end=True)
self.assertEqual(len(vec), 4)
self.assertEqual(vec.tolist(), [1, 2, 3, MockDict.END_IDX])
vec = agent._vectorize_text(text, add_start=True, add_end=True)
self.assertEqual(len(vec), 5)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2, 3, MockDict.END_IDX])
# now do it again with truncation=3
vec = agent._vectorize_text(text, add_start=False, add_end=False, truncate=3)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [1, 2, 3])
vec = agent._vectorize_text(text, add_start=True, add_end=False, truncate=3)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [1, 2, 3])
vec = agent._vectorize_text(text, add_start=False, add_end=True, truncate=3)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])
vec = agent._vectorize_text(text, add_start=True, add_end=True, truncate=3)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [2, 3, MockDict.END_IDX])
# now do it again with truncation=2
vec = agent._vectorize_text(text, add_start=False, add_end=False, truncate=2)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [2, 3])
vec = agent._vectorize_text(text, add_start=True, add_end=False, truncate=2)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [2, 3])
vec = agent._vectorize_text(text, add_start=False, add_end=True, truncate=2)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])
vec = agent._vectorize_text(text, add_start=True, add_end=True, truncate=2)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [3, MockDict.END_IDX])
# now do it again with truncation=2, don't truncate_left
vec = agent._vectorize_text(
text, add_start=False, add_end=False, truncate=2, truncate_left=False
)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [1, 2])
vec = agent._vectorize_text(
text, add_start=True, add_end=False, truncate=2, truncate_left=False
)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])
vec = agent._vectorize_text(
text, add_start=False, add_end=True, truncate=2, truncate_left=False
)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [1, 2])
vec = agent._vectorize_text(
text, add_start=True, add_end=True, truncate=2, truncate_left=False
)
self.assertEqual(len(vec), 2)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1])
# now do it again with truncation=3, don't truncate_left
vec = agent._vectorize_text(
text, add_start=False, add_end=False, truncate=3, truncate_left=False
)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [1, 2, 3])
vec = agent._vectorize_text(
text, add_start=True, add_end=False, truncate=3, truncate_left=False
)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])
vec = agent._vectorize_text(
text, add_start=False, add_end=True, truncate=3, truncate_left=False
)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [1, 2, 3])
vec = agent._vectorize_text(
text, add_start=True, add_end=True, truncate=3, truncate_left=False
)
self.assertEqual(len(vec), 3)
self.assertEqual(vec.tolist(), [MockDict.BEG_IDX, 1, 2])
def test__check_truncate(self):
"""
Make sure we are truncating when needed.
"""
agent = get_agent()
inp = torch.LongTensor([1, 2, 3])
self.assertEqual(agent._check_truncate(inp, None).tolist(), [1, 2, 3])
self.assertEqual(agent._check_truncate(inp, 4).tolist(), [1, 2, 3])
self.assertEqual(agent._check_truncate(inp, 3).tolist(), [1, 2, 3])
self.assertEqual(agent._check_truncate(inp, 2).tolist(), [1, 2])
self.assertEqual(agent._check_truncate(inp, 1).tolist(), [1])
self.assertEqual(agent._check_truncate(inp, 0).tolist(), [])
def test_vectorize(self):
"""
Test the vectorization of observations.
Make sure they do not recompute results, and respect the different param
options.
"""
agent = get_agent()
obs_labs = Message(
{'text': 'No. Try not.', 'labels': ['Do.', 'Do not.'], 'episode_done': True}
)
obs_elabs = Message(
{
'text': 'No. Try not.',
'eval_labels': ['Do.', 'Do not.'],
'episode_done': True,
}
)
for obs in (obs_labs, obs_elabs):
lab_key = 'labels' if 'labels' in obs else 'eval_labels'
lab_vec = lab_key + '_vec'
lab_chc = lab_key + '_choice'
inp = obs.copy()
# test add_start=True, add_end=True
agent.history.reset()
agent.history.update_history(inp)
out = agent.vectorize(inp, agent.history, add_start=True, add_end=True)
self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])
# note that label could be either label above
self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)
self.assertEqual(out[lab_vec][1].item(), 1)
self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)
self.assertEqual(out[lab_chc][:2], 'Do')
# test add_start=True, add_end=False
inp = obs.copy()
out = agent.vectorize(inp, agent.history, add_start=True, add_end=False)
self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])
# note that label could be either label above
self.assertEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)
self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)
self.assertEqual(out[lab_chc][:2], 'Do')
# test add_start=False, add_end=True
inp = obs.copy()
out = agent.vectorize(inp, agent.history, add_start=False, add_end=True)
self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])
# note that label could be either label above
self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)
self.assertEqual(out[lab_vec][-1].item(), MockDict.END_IDX)
self.assertEqual(out[lab_chc][:2], 'Do')
# test add_start=False, add_end=False
inp = obs.copy()
out = agent.vectorize(inp, agent.history, add_start=False, add_end=False)
self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])
# note that label could be either label above
self.assertNotEqual(out[lab_vec][0].item(), MockDict.BEG_IDX)
self.assertNotEqual(out[lab_vec][-1].item(), MockDict.END_IDX)
self.assertEqual(out[lab_chc][:2], 'Do')
# test caching of tensors
out_again = agent.vectorize(out, agent.history)
# should have cached result from before
self.assertIs(out['text_vec'], out_again['text_vec'])
self.assertEqual(out['text_vec'].tolist(), [1, 2, 3])
# next: should truncate cached result
prev_vec = out['text_vec']
out_again = agent.vectorize(out, agent.history, text_truncate=1)
self.assertIsNot(prev_vec, out_again['text_vec'])
self.assertEqual(out['text_vec'].tolist(), [3])
# test split_lines
agent = get_agent(split_lines=True)
obs = Message(
{
'text': 'Hello.\nMy name is Inogo Montoya.\n'
'You killed my father.\nPrepare to die.',
'episode_done': True,
}
)
agent.history.update_history(obs)
vecs = agent.history.get_history_vec_list()
self.assertEqual(vecs, [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])
# check cache
out_again = agent.vectorize(obs, agent.history)
vecs = agent.history.get_history_vec_list()
self.assertEqual(vecs, [[1], [1, 2, 3, 4, 5], [1, 2, 3, 4], [1, 2, 3]])
def test_batchify(self):
"""
Make sure the batchify function sets up the right fields.
"""
agent = get_agent(rank_candidates=True)
obs_labs = [
Message(
{
'text': 'It\'s only a flesh wound.',
'labels': ['Yield!'],
'episode_done': True,
}
),
Message(
{
'text': 'The needs of the many outweigh...',
'labels': ['The needs of the few.'],
'episode_done': True,
}
),
Message(
{
'text': 'Hello there.',
'labels': ['General Kenobi.'],
'episode_done': True,
}
),
]
obs_elabs = [
Message(
{
'text': 'It\'s only a flesh wound.',
'eval_labels': ['Yield!'],
'episode_done': True,
}
),
Message(
{
'text': 'The needs of the many outweigh...',
'eval_labels': ['The needs of the few.'],
'episode_done': True,
}
),
Message(
{
'text': 'Hello there.',
'eval_labels': ['General Kenobi.'],
'episode_done': True,
}
),
]
for obs_batch in (obs_labs, obs_elabs):
lab_key = 'labels' if 'labels' in obs_batch[0] else 'eval_labels'
# nothing has been vectorized yet so should be empty
batch = agent.batchify(obs_batch)
self.assertIsNone(batch.text_vec)
self.assertIsNone(batch.label_vec)
self.assertIsNone(batch.labels)
self.assertIsNone(batch.candidates)
self.assertIsNone(batch.candidate_vecs)
self.assertIsNone(batch.image)
obs_vecs = []
for o in obs_batch:
agent.history.reset()
agent.history.update_history(o)
obs_vecs.append(
agent.vectorize(o, agent.history, add_start=False, add_end=False)
)
# is_valid should map to nothing
def is_valid(obs):
return False
agent.is_valid = is_valid
batch = agent.batchify(obs_batch)
self.assertIsNone(batch.text_vec)
self.assertIsNone(batch.label_vec)
self.assertIsNone(batch.labels)
self.assertIsNone(batch.candidates)
self.assertIsNone(batch.candidate_vecs)
self.assertIsNone(batch.image)
# is_valid should check for text_vec
def is_valid(obs):
return 'text_vec' in obs
agent.is_valid = is_valid
batch = agent.batchify(obs_vecs)
# which fields were filled vs should be empty?
self.assertIsNotNone(batch.text_vec)
self.assertIsNotNone(batch.label_vec)
self.assertIsNotNone(batch.labels)
self.assertIsNone(batch.candidates)
self.assertIsNone(batch.candidate_vecs)
self.assertIsNone(batch.image)
# contents of certain fields:
self.assertEqual(
batch.text_vec.tolist(),
[[1, 2, 3, 4, 5, 0], [1, 2, 3, 4, 5, 6], [1, 2, 0, 0, 0, 0]],
)
self.assertEqual(
batch.label_vec.tolist(),
[[1, 0, 0, 0, 0], [1, 2, 3, 4, 5], [1, 2, 0, 0, 0]],
)
self.assertEqual(batch.labels, [o[lab_key][0] for o in obs_batch])
self.assertEqual(list(batch.valid_indices), [0, 1, 2])
# now sort the batch, make sure fields are in sorted order
batch = agent.batchify(obs_vecs, sort=True)
self.assertEqual(
batch.text_vec.tolist(),
[[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 0], [1, 2, 0, 0, 0, 0]],
)
self.assertEqual(
batch.label_vec.tolist(),
[[1, 2, 3, 4, 5], [1, 0, 0, 0, 0], [1, 2, 0, 0, 0]],
)
labs = [o[lab_key][0] for o in obs_batch]
self.assertEqual(batch.labels, [labs[i] for i in [1, 0, 2]])
self.assertEqual(list(batch.valid_indices), [1, 0, 2])
# now sort just on ys
new_vecs = [vecs.copy() for vecs in obs_vecs]
for vec in new_vecs:
vec.pop('text')
vec.pop('text_vec')
def is_valid(obs):
return 'labels_vec' in obs or 'eval_labels_vec' in obs
agent.is_valid = is_valid
batch = agent.batchify(new_vecs, sort=True)
self.assertIsNone(batch.text_vec)
self.assertIsNotNone(batch.label_vec)
self.assertEqual(
batch.label_vec.tolist(),
[[1, 2, 3, 4, 5], [1, 2, 0, 0, 0], [1, 0, 0, 0, 0]],
)
labs = [o[lab_key][0] for o in new_vecs]
self.assertEqual(batch.labels, [labs[i] for i in [1, 2, 0]])
self.assertEqual(list(batch.valid_indices), [1, 2, 0])
# test is_valid
def is_valid(obs):
return 'text_vec' in obs and len(obs['text_vec']) < 3
agent.is_valid = is_valid
batch = agent.batchify(obs_vecs)
self.assertEqual(batch.text_vec.tolist(), [[1, 2]])
self.assertEqual(batch.label_vec.tolist(), [[1, 2]])
self.assertEqual(batch.labels, obs_batch[2][lab_key])
self.assertEqual(list(batch.valid_indices), [2])
agent.history.reset()
obs_cands = [
agent.vectorize(
Message({'label_candidates': ['A', 'B', 'C']}), agent.history
),
agent.vectorize(
Message({'label_candidates': ['1', '2', '5', '3', 'Sir']}),
agent.history,
),
agent.vectorize(
Message({'label_candidates': ['Do', 'Re', 'Mi']}), agent.history
),
agent.vectorize(
Message({'label_candidates': ['Fa', 'So', 'La', 'Ti']}), agent.history
),
]
# is_valid should check for label candidates vecs
def is_valid(obs):
return 'label_candidates_vecs' in obs
agent.is_valid = is_valid
batch = agent.batchify(obs_cands)
self.assertTrue(agent.rank_candidates, 'Agent not set up to rank.')
self.assertIsNone(batch.text_vec)
self.assertIsNone(batch.label_vec)
self.assertIsNone(batch.labels)
self.assertIsNotNone(batch.valid_indices)
self.assertIsNotNone(batch.candidates)
self.assertIsNotNone(batch.candidate_vecs)
self.assertEqual(list(batch.valid_indices), [0, 1, 2, 3])
self.assertEqual(batch.candidates, [o['label_candidates'] for o in obs_cands])
self.assertEqual(len(batch.candidate_vecs), len(obs_cands))
for i, cs in enumerate(batch.candidate_vecs):
self.assertEqual(len(cs), len(obs_cands[i]['label_candidates']))
def test_match_batch(self):
"""
Make sure predictions are correctly aligned when available.
"""
agent = get_agent()
# first try empty outputs
reply = agent.match_batch([{}, {}, {}], [0, 1, 2], Output())
self.assertEqual([{}, {}, {}], reply)
reply = agent.match_batch([{}, {}, {}], [0, 1, 2], None)
self.assertEqual([{}, {}, {}], reply)
# try text in order
reply = agent.match_batch(
[{}, {}, {}], [0, 1, 2], Output(['E.T.', 'Phone', 'Home'])
)
self.assertEqual([{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)
# try text out of order
reply = agent.match_batch(
[{}, {}, {}], [2, 0, 1], Output(['Home', 'E.T.', 'Phone'])
)
self.assertEqual([{'text': 'E.T.'}, {'text': 'Phone'}, {'text': 'Home'}], reply)
# try text_candidates in order
reply = agent.match_batch(
[{}, {}],
[0, 1],
Output(
None,
[
['More human than human.', 'Less human than human'],
['Just walk into Mordor', 'Just QWOP into Mordor.'],
],
),
)
self.assertEqual(
reply[0]['text_candidates'],
['More human than human.', 'Less human than human'],
)
self.assertEqual(
reply[1]['text_candidates'],
['Just walk into Mordor', 'Just QWOP into Mordor.'],
)
# try text_candidates out of order
reply = agent.match_batch(
[{}, {}],
[1, 0],
Output(
None,
[
['More human than human.', 'Less human than human'],
['Just walk into Mordor', 'Just QWOP into Mordor.'],
],
),
)
self.assertEqual(
reply[0]['text_candidates'],
['Just walk into Mordor', 'Just QWOP into Mordor.'],
)
self.assertEqual(
reply[1]['text_candidates'],
['More human than human.', 'Less human than human'],
)
# try both text and text_candidates in order
reply = agent.match_batch(
[{}, {}],
[0, 1],
Output(
['You shall be avenged...', 'Man creates dinosaurs...'],
[
['By Grabthar’s hammer.', 'By the suns of Worvan.'],
['Dinosaurs eat man.', 'Woman inherits the earth.'],
],
),
)
self.assertEqual(reply[0]['text'], 'You shall be avenged...')
self.assertEqual(
reply[0]['text_candidates'],
['By Grabthar’s hammer.', 'By the suns of Worvan.'],
)
self.assertEqual(reply[1]['text'], 'Man creates dinosaurs...')
self.assertEqual(
reply[1]['text_candidates'],
['Dinosaurs eat man.', 'Woman inherits the earth.'],
)
# try both text and text_candidates out of order
reply = agent.match_batch(
[{}, {}],
[1, 0],
Output(
['You shall be avenged...', 'Man creates dinosaurs...'],
[
['By Grabthar’s hammer.', 'By the suns of Worvan.'],
['Dinosaurs eat man.', 'Woman inherits the earth.'],
],
),
)
self.assertEqual(reply[0]['text'], 'Man creates dinosaurs...')
self.assertEqual(
reply[0]['text_candidates'],
['Dinosaurs eat man.', 'Woman inherits the earth.'],
)
self.assertEqual(reply[1]['text'], 'You shall be avenged...')
self.assertEqual(
reply[1]['text_candidates'],
['By Grabthar’s hammer.', 'By the suns of Worvan.'],
)
def test__add_person_tokens(self):
"""
Make sure person tokens are added to the write place in text.
"""
agent = get_agent()
text = (
"I've seen things you people wouldn't believe.\n"
"Attack ships on fire off the shoulder of Orion.\n"
"I watched C-beams glitter in the dark near the Tannhauser gate.\n"
"All those moments will be lost in time, like tears in rain."
)
prefix = 'PRE'
out = agent.history._add_person_tokens(text, prefix, add_after_newln=False)
self.assertEqual(out, prefix + ' ' + text)
out = agent.history._add_person_tokens(text, prefix, add_after_newln=True)
idx = text.rfind('\n') + 1
self.assertEqual(out, text[:idx] + prefix + ' ' + text[idx:])
def test_history(self):
"""
Test different dialog history settings.
"""
# try with unlimited history
agent = get_agent(history_size=-1)
obs = {'text': 'I am Groot.', 'labels': ['I am Groot?'], 'episode_done': False}
# first exchange
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# second exchange, no reply
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.\nI am Groot.')
# include reply
agent.history.add_reply('I am Groot?')
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.\nI am Groot.\nI am Groot?\nI am Groot.')
# on reset should be same as first exchange
agent.history.reset()
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# now try with history size = 1
agent = get_agent(history_size=1)
# first exchange
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# second exchange should change nothing
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# third exchange with reply should change nothing
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# now if we add the reply, we should only have the reply left
agent.history.add_reply(obs['labels'][0])
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot?')
# now try with history size = 2
agent = get_agent(history_size=2)
# first exchange
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# second exchange with reply should contain reply
agent.history.add_reply('I am Groot?')
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot?\nI am Groot.')
# now try with history size = 3
agent = get_agent(history_size=3)
# first exchange
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.')
# second exchange with reply should contain reply and input
agent.history.add_reply('I am Groot?')
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.\nI am Groot?\nI am Groot.')
# now test add_person_tokens
agent = get_agent(history_size=3, person_tokens=True)
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, '{} I am Groot.'.format(agent.P1_TOKEN))
# second exchange, history should still contain the tokens
agent.history.add_reply('I am Groot?')
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(
text,
'{} I am Groot.\n{} I am Groot?\n{} I am Groot.'.format(
agent.P1_TOKEN, agent.P2_TOKEN, agent.P1_TOKEN
),
)
# now add add_p1_after_newln
agent = get_agent(history_size=3, person_tokens=True, add_p1_after_newln=True)
ctx_obs = obs.copy() # context then utterance in this text field
ctx_obs['text'] = 'Groot is Groot.\nI am Groot.'
agent.history.update_history(ctx_obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'Groot is Groot.\n{} I am Groot.'.format(agent.P1_TOKEN))
# second exchange, history should still contain context text
agent.history.add_reply('I am Groot?')
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(
text,
'Groot is Groot.\n{} I am Groot.\n{} I am Groot?\n{} I am Groot.'.format(
agent.P1_TOKEN, agent.P2_TOKEN, agent.P1_TOKEN
),
)
# test history vecs
agent.history.reset()
agent.history.update_history(obs)
vec = agent.history.get_history_vec()
self.assertEqual(vec, [2001, 1, 2, 3])
# test history vec list
agent.history.update_history(obs)
vecs = agent.history.get_history_vec_list()
self.assertEqual(vecs, [[2001, 1, 2, 3], [2001, 1, 2, 3]])
# test clearing history
agent.history.reset()
text = agent.history.get_history_str()
self.assertIsNone(text)
vecs = agent.history.get_history_vec_list()
self.assertEqual(vecs, [])
# test delimiter
agent = get_agent(history_size=-1, delimiter=' Groot! ')
agent.history.update_history(obs)
agent.history.update_history(obs)
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot. Groot! I am Groot.')
# test global_end_token, this will append a selected token to the end
# of history block
agent = get_agent(history_add_global_end_token='end')
agent.history.reset()
agent.history.update_history(obs)
vec = agent.history.get_history_vec()
self.assertEqual(vec, [1, 2, 3, MockDict.END_IDX])
# test temp history
agent = get_agent(
history_size=-1, include_temp_history=True, delimiter='__delim__'
)
agent.history.reset()
agent.history.update_history(obs, temp_history=' temp history')
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot. temp history')
vec = agent.history.get_history_vec()
self.assertEqual(vec, [1, 2, 3, 1, 2])
agent.history.update_history(obs, temp_history=' temp history')
text = agent.history.get_history_str()
self.assertEqual(text, 'I am Groot.__delim__I am Groot. temp history')
vecs = agent.history.get_history_vec_list()
self.assertEqual(vecs, [[1, 2, 3], [1, 2, 3]])
vec = agent.history.get_history_vec()
self.assertEqual(vec, [1, 2, 3, 1, 1, 2, 3, 1, 2])
def test_reversed_history(self):
agent = get_agent(history_reversed=True)
agent.history.reset()
agent.history.update_history({'text': 'hello i am stephen'})
agent.history.update_history({'text': 'i am bob'})
assert agent.history.get_history_str() == 'hello i am stephen\ni am bob'
agent.history.reset()
agent.history.update_history(
{'text': 'your persona: filler\nhello i am stephen'}
)
agent.history.update_history({'text': 'i am bob'})
assert (
agent.history.get_history_str()
== 'your persona: filler\nhello i am stephen\ni am bob'
)
def test_observe(self):
"""
Make sure agent stores and returns observation.
"""
agent = get_agent()
# text could be none
obs = {'text': None, 'episode_done': True}
out = agent.observe(obs.copy())
self.assertIsNotNone(out)
# make sure we throw an exception for having an episode done without a reset
obs = {'text': "I'll be back.", 'labels': ["I'm back."], 'episode_done': True}
with self.assertRaises(RuntimeError):
agent.observe(obs.copy())
# okay, let's do it properly now
agent.reset()
obs = {'text': "I'll be back.", 'labels': ["I'm back."], 'episode_done': True}
out = agent.observe(obs.copy())
self.assertIsNotNone(out)
self.assertIsNotNone(agent.observation)
self.assertEqual(out['text'], "I'll be back.")
# now try with episode not done
agent = get_agent()
obs['episode_done'] = False
out = agent.observe(obs.copy())
self.assertIsNotNone(out)
self.assertIsNotNone(agent.observation)
self.assertEqual(out['text'], "I'll be back.")
# should remember history
agent.act()
out = agent.observe(obs.copy())
self.assertEqual(out['full_text'], "I'll be back.\nI'm back.\nI'll be back.")
def test_batch_act(self):
"""
Make sure batch act calls the right step.
"""
agent = get_agent()
obs_labs = [
Message(
{
'text': "It's only a flesh wound.",
'labels': ['Yield!'],
'episode_done': True,
}
),
Message(
{
'text': 'The needs of the many outweigh...',
'labels': ['The needs of the few.'],
'episode_done': True,
}
),
Message(
{
'text': 'Hello there.',
'labels': ['General Kenobi.'],
'episode_done': True,
}
),
]
obs_labs_vecs = []
for o in obs_labs:
agent.history.reset()
agent.history.update_history(o)
obs_labs_vecs.append(agent.vectorize(o, agent.history))
reply = agent.batch_act(obs_labs_vecs)
for i in range(len(obs_labs_vecs)):
self.assertEqual(reply[i]['text'], 'Training {}!'.format(i))
obs_elabs = [
Message(
{
'text': "It's only a flesh wound.",
'eval_labels': ['Yield!'],
'episode_done': True,
}
),
Message(
{
'text': 'The needs of the many outweigh...',
'eval_labels': ['The needs of the few.'],
'episode_done': True,
}
),
Message(
{
'text': 'Hello there.',
'eval_labels': ['General Kenobi.'],
'episode_done': True,
}
),
]
obs_elabs_vecs = []
for o in obs_elabs:
agent.history.reset()
agent.history.update_history(o)
obs_elabs_vecs.append(agent.vectorize(o, agent.history))
reply = agent.batch_act(obs_elabs_vecs)
for i in range(len(obs_elabs_vecs)):
self.assertIn('Evaluating {}'.format(i), reply[i]['text'])
def test_interactive_mode(self):
"""
Test if conversation history is destroyed in MTurk mode.
"""
# both manually setting bs to 1 and interactive mode true
agent = get_agent(batchsize=1, interactive_mode=True)
agent.observe(Message({'text': 'foo', 'episode_done': True}))
response = agent.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
shared = create_agent_from_shared(agent.share())
shared.observe(Message({'text': 'bar', 'episode_done': True}))
response = shared.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
# now just bs 1
agent = get_agent(batchsize=1, interactive_mode=False)
agent.observe(Message({'text': 'foo', 'episode_done': True}))
response = agent.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
shared = create_agent_from_shared(agent.share())
shared.observe(Message({'text': 'bar', 'episode_done': True}))
response = shared.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
# now just interactive
shared = create_agent_from_shared(agent.share())
agent.observe(Message({'text': 'foo', 'episode_done': True}))
response = agent.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
shared = create_agent_from_shared(agent.share())
shared.observe(Message({'text': 'bar', 'episode_done': True}))
response = shared.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
# finally, actively attempt to sabotage
agent = get_agent(batchsize=16, interactive_mode=False)
agent.observe(Message({'text': 'foo', 'episode_done': True}))
response = agent.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
shared = create_agent_from_shared(agent.share())
shared.observe(Message({'text': 'bar', 'episode_done': True}))
response = shared.act()
self.assertIn(
'Evaluating 0', response['text'], 'Incorrect output in single act()'
)
def test_use_reply(self):
"""
Check that self-observe is correctly acting on labels.
"""
# default is hybrid label-model, which uses the label if it's available, and
# otherwise the label
# first check if there is a label available
agent = get_agent()
obs = Message({'text': 'Call', 'labels': ['Response'], 'episode_done': False})
agent.observe(obs)
_ = agent.act()
self.assertEqual(agent.history.get_history_str(), 'Call\nResponse')
# check if there is no label
agent.reset()
obs = Message({'text': 'Call', 'episode_done': False})
agent.observe(obs)
_ = agent.act()
self.assertEqual(
agent.history.get_history_str(), 'Call\nEvaluating 0 (responding to [[1]])!'
)
# now some of the other possible values of --use-reply
# --use-reply model. even if there is a label, we should see the model's out
agent = get_agent(use_reply='model')
obs = Message({'text': 'Call', 'labels': ['Response'], 'episode_done': False})
agent.observe(obs)
_ = agent.act()
self.assertEqual(agent.history.get_history_str(), 'Call\nTraining 0!')
# --use-reply none doesn't hear itself
agent = get_agent(use_reply='none')
obs = Message({'text': 'Call', 'labels': ['Response'], 'episode_done': False})
agent.observe(obs)
agent.act()
self.assertEqual(agent.history.get_history_str(), 'Call')
def test_mturk_racehistory(self):
"""
Emulate a setting where batch_act misappropriately handles mturk.
"""
agent = get_agent(batchsize=16, interactive_mode=True, echo=True)
share1 = create_agent_from_shared(agent.share())
share1.observe(Message({'text': 'thread1-msg1', 'episode_done': False}))
share2 = create_agent_from_shared(agent.share())
share2.observe(Message({'text': 'thread2-msg1', 'episode_done': False}))
share1.act()
share2.act()
share1.observe(Message({'text': 'thread1-msg2', 'episode_done': False}))
share2.observe(Message({'text': 'thread2-msg2', 'episode_done': False}))
share2.act()
share1.act()
share2.observe(Message({'text': 'thread2-msg3', 'episode_done': False}))
share1.observe(Message({'text': 'thread1-msg3', 'episode_done': False}))
self.assertNotIn('thread1-msg1', share2.history.get_history_str())
self.assertNotIn('thread2-msg1', share1.history.get_history_str())
self.assertNotIn('thread1-msg2', share2.history.get_history_str())
self.assertNotIn('thread2-msg2', share1.history.get_history_str())
def test_resume_checkpoint(self):
"""
Make sure when resuming training that model uses appropriate mf.
Copy train_model from testing_utils to directly access agent.
"""
import parlai.scripts.train_model as tms
def get_popt_and_tl(opt):
parser = tms.setup_args()
parser.set_params(**opt)
popt = parser.parse_args([])
for k, v in opt.items():
popt[k] = v
return popt, tms.TrainLoop(popt)
def get_opt(init_mf, mf):
return {
'task': 'integration_tests',
'init_model': init_mf,
'model': 'parlai.agents.test_agents.test_agents:MockTorchAgent',
'model_file': mf,
'num_epochs': 3,
'validation_every_n_epochs': 1,
'save_after_valid': True,
'log_every_n_secs': 10,
}
with tempdir() as tmpdir:
# First train model with init_model path set
mf = os.path.join(tmpdir, 'model')
init_mf = os.path.join(tmpdir, 'init_model')
with open(init_mf, 'w') as f:
f.write(' ')
opt = get_opt(init_mf, mf)
popt, tl = get_popt_and_tl(opt)
agent = tl.agent
# init model file should be set appropriately
init_model_file, is_finetune = agent._get_init_model(popt, None)
self.assertEqual(init_model_file, init_mf)
self.assertTrue(is_finetune)
valid, test = tl.train()
# now, train the model for another epoch
opt = get_opt('{}.checkpoint'.format(mf), mf)
opt['load_from_checkpoint'] = True
popt, tl = get_popt_and_tl(opt)
agent = tl.agent
init_model_file, is_finetune = agent._get_init_model(popt, None)
self.assertEqual(init_model_file, '{}.checkpoint'.format(mf))
self.assertFalse(is_finetune)
def test_truncate_metrics(self):
agent = get_agent(model='test_agents/unigram', truncate=5)
obs = {
'text': "I'll be back. I'll be back. I'll be back.",
'labels': ["I'll be back. I'll be back. I'll be back."],
'episode_done': True,
}
obs = agent.observe(obs)
agent.act()
self.assertEqual(agent._local_metrics['ctrunc'][0].value(), 1.0)
self.assertEqual(agent._local_metrics['ltrunc'][0].value(), 1.0)
self.assertEqual(agent._local_metrics['clen'][0].value(), 9)
self.assertEqual(agent._local_metrics['llen'][0].value(), 11)
self.assertEqual(
agent._local_metrics['context_average_tokens_truncated'][0].value(), 4
)
self.assertEqual(
agent._local_metrics['label_average_tokens_truncated'][0].value(), 6
)
| 39.455061
| 88
| 0.559659
|
acff080a2eae589e43f2fabcf3724a2a327a6d45
| 694
|
py
|
Python
|
badges/views.py
|
Tudmotu/Open-Knesset
|
005adff8422ad34af8f78b0f32e7052b65a5bad3
|
[
"BSD-3-Clause"
] | 1
|
2018-12-11T01:43:25.000Z
|
2018-12-11T01:43:25.000Z
|
badges/views.py
|
Tudmotu/Open-Knesset
|
005adff8422ad34af8f78b0f32e7052b65a5bad3
|
[
"BSD-3-Clause"
] | null | null | null |
badges/views.py
|
Tudmotu/Open-Knesset
|
005adff8422ad34af8f78b0f32e7052b65a5bad3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models import Count
from django.views.generic import DetailView, ListView
from models import Badge, BadgeType
class BadgeTypeDetailView(DetailView):
model = BadgeType
template_name = 'badges/badge_detail.html'
def get_context_data(self, *args, **kwargs):
context = super(BadgeTypeDetailView, self).get_context_data(
*args,
**kwargs
)
context['badges'] = context['object'].badges.order_by('-created').all()
return context
class BadgeTypeListView(ListView):
queryset = BadgeType.objects.all().annotate(
amount=Count('badges')).order_by('-amount')
template_name = 'badges/all_badge_list.html'
| 30.173913
| 79
| 0.688761
|
acff08d82c05b50a9e27cf3c490041bba9bb47dc
| 9,627
|
py
|
Python
|
txtools/normalizer.py
|
fdelgados/Textools
|
d7721a35961765a35cd45e115ff24bbd7eef58e4
|
[
"MIT"
] | 1
|
2020-06-13T01:39:44.000Z
|
2020-06-13T01:39:44.000Z
|
txtools/normalizer.py
|
fdelgados/Textools
|
d7721a35961765a35cd45e115ff24bbd7eef58e4
|
[
"MIT"
] | null | null | null |
txtools/normalizer.py
|
fdelgados/Textools
|
d7721a35961765a35cd45e115ff24bbd7eef58e4
|
[
"MIT"
] | null | null | null |
import sys
import os
import re
import html
import unicodedata
import string
import binascii
from typing import List, Tuple
import nltk
from contextlib import redirect_stdout
from nltk.corpus import stopwords, wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
from sklearn.base import BaseEstimator, TransformerMixin
with redirect_stdout(open(os.devnull, "w")):
nltk.download('punkt', quiet=True)
nltk.download('stopwords', quiet=True)
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
class TextNormalizer(BaseEstimator, TransformerMixin):
ALLOWED_LANGUAGES = {'en': 'english',
'es': 'spanish',
'fr': 'french',
'de': 'german',
'it': 'italian',
'ca': 'catalan',
'pt': 'portuguese'}
DEFAULT_LANG_CODE = 'en'
LEMMATIZATION = 'lemmatization'
STEMMING = 'stemming'
DEFAULT_K_SHINGLES = 5
DEFAULT_NORMALIZATION_METHOD = 'stemming'
def __init__(self, method: str = None, lang_code: str = None):
"""TextNormalizer contructor
:param method: Normalization method: stemming or lemmatization
:param lang_code: ISO 639-1 code language
"""
if method != self.LEMMATIZATION and method != self.STEMMING:
raise ValueError('Invalid reduction method')
self.language = self.__get_language__(lang_code)
self.normalization_method = self.__get_normalization_method__(method)
self.lemmatizer = WordNetLemmatizer()
self.stop_words = stopwords.words(self.language)
def __get_language__(self, lang_code: str) -> str:
"""Return the language name from language code
:param lang_code: ISO 639-1 code language
:return: Language name
:raises: ValueError
"""
if lang_code not in self.ALLOWED_LANGUAGES.keys():
raise ValueError('{} is not a supported language code'.format(lang_code))
return self.ALLOWED_LANGUAGES[lang_code]
def __get_normalization_method__(self, normalization_method: str = None):
"""Returns the normalization method
:param normalization_method: Normalization method to check
:return: Normalization method
"""
if not normalization_method:
return self.DEFAULT_NORMALIZATION_METHOD
if normalization_method != self.LEMMATIZATION and normalization_method != self.STEMMING:
raise ValueError('Invalid normalization method')
return normalization_method
@staticmethod
def is_punct(token: str) -> bool:
""" Checks if all chars in token are punctuation symbols
:param token: Token
:return: True if all chars in token are punctuation symbols, False otherwise
"""
return all(unicodedata.category(char).startswith('P') for char in token)
def is_stopword(self, token: str) -> bool:
""" Checks if token is a stop word
:param token: Token
:return: True if token is a stop word, False otherwise
"""
return token.lower() in self.stop_words
def normalize(self, text: str) -> str:
""" Normalize text
:param text: Text to be normalized
:return: Normalized text
"""
text = clean_text(text)
if self.normalization_method == self.LEMMATIZATION:
return self.normalize_with_lemmatization(text)
elif self.normalization_method == self.STEMMING:
return self.normalize_with_stemming(text)
def normalize_with_lemmatization(self, text: str) -> str:
"""Return normalized text by lemmatization method
:param text: Text to normalize
:return: Normalized text
"""
sentences = self.sentence_tokenize(text)
normalized_tokens = [
self.lemmatize(token, tag).lower()
for sentence in sentences
for (token, tag) in sentence
if not TextNormalizer.is_punct(token) and not self.is_stopword(token)
]
return ' '.join([' '.join(tokens) for tokens in normalized_tokens])
def sentence_tokenize(self, text: str) -> List[Tuple[str, str]]:
""" Splits text in a list of tuples composed by token and his part of speech tag
:param text: Text to be tokenized
:return: List of tuples composed by token and his part of speech tag
"""
return [
nltk.pos_tag(nltk.wordpunct_tokenize(sentence))
for sentence in nltk.sent_tokenize(text, language=self.language)
]
def create_shingles(self, text: str, unique: bool = True, k_shingles: int = None) -> List[int]:
if not k_shingles:
k_shingles = self.DEFAULT_K_SHINGLES
text = text.strip().lower()
shingles = [text[head:head + k_shingles] for head in range(0, len(text) - k_shingles)]
if unique:
shingles = set(shingles)
return [binascii.crc32(shingle.encode('utf8')) & 0xffffffff for shingle in shingles]
def normalize_with_stemming(self, text: str) -> str:
"""Normalize text by stemming method
:param text: Text to normalize
:return: Normalized text
"""
tokenizer = RegexpTokenizer(r'\w+')
stemmer = SnowballStemmer(self.language)
tokens = tokenizer.tokenize(text.lower())
tokens = [token for token in tokens
if not self.is_stopword(token) and not TextNormalizer.is_punct(token)]
tokens_stemmed = [stemmer.stem(token) for token in tokens]
return ' '.join(tokens_stemmed)
def lemmatize(self, token, pos_tag):
""" Lemmatize token
:param token: Token
:param pos_tag: Part-of-speech tag
:return: Lemmatized word
"""
tag = {
'N': wordnet.NOUN,
'V': wordnet.VERB,
'R': wordnet.ADV,
'J': wordnet.ADJ
}.get(pos_tag[0], wordnet.NOUN)
return self.lemmatizer.lemmatize(token, tag)
def fit(self, X, y=None):
return self
def transform(self, documents):
return [self.normalize(doc) for doc in documents]
def clean_text(text: str, cleaners: List[str] = None, exclude: List[str] = None) -> str:
""" Removes unwanted chars from text
:param text: Text to be cleaned
:param cleaners: List of cleaners to be applied to text
:param exclude: List of cleaners that wont be applied
:return: Clean text
"""
if not cleaners:
cleaners = ['html_tags', 'html_entities', 'unicode_nbsp', 'tabs', 'new_line'
'extra_quotation', 'non_ascii', 'extra_whitespaces', 'urls', 'punctuation']
if exclude:
cleaners = [cleaner for cleaner in cleaners if cleaner not in exclude]
for cleaner in cleaners:
cleaner_func_name = 'remove_{}'.format(cleaner)
try:
cleaner_function = getattr(sys.modules[__name__], cleaner_func_name)
except AttributeError:
continue
text = cleaner_function(text)
return text
def remove_html_tags(text: str) -> str:
""" Removes html tags
:param text: Text to be cleaned
:return: Clean text
"""
clean = re.compile(r'<.*?>')
return re.sub(clean, '', text)
def remove_extra_whitespaces(text: str) -> str:
""" Removes extra whitespaces
:param text: Text to be cleaned
:return: Clean text
"""
return re.sub(r' +', ' ', text)
def remove_extra_quotation(text: str) -> str:
""" Removes extra quotation marks
:param text: Text to be cleaned
:return: Clean text
"""
text = re.sub(r'\"{2,}', '"', text)
return re.sub(r'\'{2,}', "'", text)
def remove_new_line(text: str) -> str:
""" Removes new line chars
:param text: Text to be cleaned
:return: Clean text
"""
return text.translate(str.maketrans('\n\r', ' '))
def remove_tabs(text: str) -> str:
""" Removes tabs
:param text: Text to be cleaned
:return: Clean text
"""
return text.replace('\t', ' ')
def remove_unicode_nbsp(text: str) -> str:
""" Removes unicode whitespaces
:param text: Text to be cleaned
:return: Clean text
"""
return text.replace(u'\xa0', u' ')
def remove_html_entities(text: str) -> str:
""" Converts html entities in the corresponding unicode string
:param text: Text to be cleaned
:return: Clean text
"""
return html.unescape(text)
def remove_non_ascii(text: str) -> str:
""" Removes non ascii characters
:param text: Text to be cleaned
:return: Clean text
"""
return ''.join(char for char in text if ord(char) < 128)
def remove_urls(text):
""" Removes all urls from text
:param text: The string being searched and replaced on
:return: Text without the urls
"""
url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, '')
return text
def remove_punctuation(text: str) -> str:
""" Removes punctuation from text
:param text: The string being searched and replaced on
:return: Text without the punctuation characters
"""
punctuation = string.punctuation + '¿¡'
table = str.maketrans('', '', punctuation)
words = text.split()
stripped = [word.translate(table) for word in words]
return ' '.join(stripped)
| 31.772277
| 99
| 0.631349
|
acff09226a0f6f8c1f3a7ea318da1fc33082b932
| 11,524
|
py
|
Python
|
layers/graphsage_layer.py
|
Janusz478/gnn_ce
|
91fc9610557772f0179cbd47ac13d3213a37bdfe
|
[
"MIT"
] | 2
|
2021-12-16T14:23:43.000Z
|
2022-03-01T14:36:45.000Z
|
layers/graphsage_layer.py
|
MrRyschkov/LGP-GNN
|
3eb7a54016423abad30c1e30bf4df96fdd0851b4
|
[
"MIT"
] | null | null | null |
layers/graphsage_layer.py
|
MrRyschkov/LGP-GNN
|
3eb7a54016423abad30c1e30bf4df96fdd0851b4
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import SAGEConv
"""
GraphSAGE:
William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
class GraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.aggregator_type = aggregator_type
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
if dgl_builtin == False:
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=bias)
if aggregator_type == "maxpool":
self.aggregator = MaxPoolAggregator(in_feats, in_feats,
activation, bias)
elif aggregator_type == "lstm":
self.aggregator = LSTMAggregator(in_feats, in_feats)
else:
self.aggregator = MeanAggregator()
else:
self.sageconv = SAGEConv(in_feats, out_feats, aggregator_type,
dropout, activation=activation)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def forward(self, g, h):
h_in = h # for residual connection
if self.dgl_builtin == False:
h = self.dropout(h)
g.ndata['h'] = h
#g.update_all(fn.copy_src(src='h', out='m'),
# self.aggregator,
# self.nodeapply)
if self.aggregator_type == 'maxpool':
g.ndata['h'] = self.aggregator.linear(g.ndata['h'])
g.ndata['h'] = self.aggregator.activation(g.ndata['h'])
g.update_all(fn.copy_src('h', 'm'), fn.max('m', 'c'), self.nodeapply)
elif self.aggregator_type == 'lstm':
g.update_all(fn.copy_src(src='h', out='m'),
self.aggregator,
self.nodeapply)
else:
g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'c'), self.nodeapply)
h = g.ndata['h']
else:
h = self.sageconv(g, h)
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, aggregator={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.aggregator_type, self.residual)
"""
Aggregators for GraphSage
"""
class Aggregator(nn.Module):
"""
Base Aggregator class.
"""
def __init__(self):
super().__init__()
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
def aggre(self, neighbour):
# N x F
raise NotImplementedError
class MeanAggregator(Aggregator):
"""
Mean Aggregator for graphsage
"""
def __init__(self):
super().__init__()
def aggre(self, neighbour):
mean_neighbour = torch.mean(neighbour, dim=1)
return mean_neighbour
class MaxPoolAggregator(Aggregator):
"""
Maxpooling aggregator for graphsage
"""
def __init__(self, in_feats, out_feats, activation, bias):
super().__init__()
self.linear = nn.Linear(in_feats, out_feats, bias=bias)
self.activation = activation
def aggre(self, neighbour):
neighbour = self.linear(neighbour)
if self.activation:
neighbour = self.activation(neighbour)
maxpool_neighbour = torch.max(neighbour, dim=1)[0]
return maxpool_neighbour
class LSTMAggregator(Aggregator):
"""
LSTM aggregator for graphsage
"""
def __init__(self, in_feats, hidden_feats):
super().__init__()
self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
self.hidden_dim = hidden_feats
self.hidden = self.init_hidden()
nn.init.xavier_uniform_(self.lstm.weight,
gain=nn.init.calculate_gain('relu'))
def init_hidden(self):
"""
Defaulted to initialite all zero
"""
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def aggre(self, neighbours):
"""
aggregation function
"""
# N X F
rand_order = torch.randperm(neighbours.size()[1])
neighbours = neighbours[:, rand_order, :]
(lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
return lstm_out[:, -1, :]
def forward(self, node):
neighbour = node.mailbox['m']
c = self.aggre(neighbour)
return {"c": c}
class NodeApply(nn.Module):
"""
Works -> the node_apply function in DGL paradigm
"""
def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear = nn.Linear(in_feats * 2, out_feats, bias)
self.activation = activation
def concat(self, h, aggre_result):
bundle = torch.cat((h, aggre_result), 1)
bundle = self.linear(bundle)
return bundle
def forward(self, node):
h = node.data['h']
c = node.data['c']
bundle = self.concat(h, c)
bundle = F.normalize(bundle, p=2, dim=1)
if self.activation:
bundle = self.activation(bundle)
return {"h": bundle}
##############################################################
#
# Additional layers for edge feature/representation analysis
#
##############################################################
class GraphSageLayerEdgeFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h):
h_in = h # for residual connection
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
if self.batch_norm:
h = self.batchnorm_h(h)
if self.residual:
h = h_in + h # residual connection
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
##############################################################
class GraphSageLayerEdgeReprFeat(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
aggregator_type, batch_norm, residual=False,
bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.batch_norm = batch_norm
self.residual = residual
if in_feats != out_feats:
self.residual = False
self.dropout = nn.Dropout(p=dropout)
self.activation = activation
self.A = nn.Linear(in_feats, out_feats, bias=bias)
self.B = nn.Linear(in_feats, out_feats, bias=bias)
self.C = nn.Linear(in_feats, out_feats, bias=bias)
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
self.batchnorm_e = nn.BatchNorm1d(out_feats)
def message_func(self, edges):
Ah_j = edges.src['Ah']
e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
edges.data['e'] = e_ij
return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
def reduce_func(self, nodes):
# Anisotropic MaxPool aggregation
Ah_j = nodes.mailbox['Ah_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
Ah_j = sigma_ij * Ah_j
if self.activation:
Ah_j = self.activation(Ah_j)
c = torch.max(Ah_j, dim=1)[0]
return {'c' : c}
def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e
h = self.dropout(h)
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func,
self.reduce_func,
self.nodeapply)
h = g.ndata['h']
e = g.edata['e']
if self.activation:
e = self.activation(e) # non-linear activation
if self.batch_norm:
h = self.batchnorm_h(h)
e = self.batchnorm_e(e)
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, residual={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels,
self.residual)
| 31.061995
| 114
| 0.535925
|
acff098355cac8e6ac018ba448df54201324c2a8
| 20,587
|
py
|
Python
|
plotTableScripts/jsonPlot.py
|
rauterRaphael/comparisonNILMOutlierDetection
|
c38a126dffa0b7493a630461121e05ee36a12eb4
|
[
"MIT"
] | null | null | null |
plotTableScripts/jsonPlot.py
|
rauterRaphael/comparisonNILMOutlierDetection
|
c38a126dffa0b7493a630461121e05ee36a12eb4
|
[
"MIT"
] | null | null | null |
plotTableScripts/jsonPlot.py
|
rauterRaphael/comparisonNILMOutlierDetection
|
c38a126dffa0b7493a630461121e05ee36a12eb4
|
[
"MIT"
] | null | null | null |
import os
import sys
import csv
import json
import numpy as np
import copy
import glob
import shutil
import collections
import matplotlib.pyplot as plt
### ------------------------------------------------------------- ###
def getJsonFile():
if not os.path.exists("simuResults"):
return input("Path to json file: ")
else:
i = 1
dirContent = os.listdir("simuResults")
dirContent = glob.glob("simuResults/*.json")
dirContent.sort()
for dir in dirContent:
if "json" in dir:
print(str(i) + " - " + str(dir).split("/")[1])
i += 1
for j in range(3):
fileNum = int(input("File num: ")) - 1
if fileNum in range(i-1):
filePath = dirContent[fileNum]
return filePath
else:
print("Computer sagt nein.")
return None
def loadJsonContent(jsonFile):
with open(jsonFile, 'r') as handle:
json_data = [json.loads(line) for line in handle]
return json_data
### ------------------------------------------------------------- ###
def getMaxBuildingNum(fileContent):
maxNum = 0
for line in fileContent:
buildNum = line[0]["application"][0]
if buildNum > maxNum:
maxNum = buildNum
return maxNum
def getAllApplications(fileContent):
applicationsAll = []
applicationsMerged = []
for line in fileContent:
appl = line[0]["application"]
if appl not in applicationsAll:
applicationsAll.append(appl)
if appl[1] not in applicationsMerged:
applicationsMerged.append(appl[1])
return applicationsAll, applicationsMerged
### ------------------------------------------------------------- ###
def storeResults(fileName, outlier):
if os.path.exists(fileName):
fileMode = 'a'
else:
fileMode = 'w'
with open(fileName, fileMode) as outfile:
json.dump(outlier, outfile, indent=1)
outfile.write("\n")
def getOutPerAppl(fileContent, applications):
outPerAppl = []
applData = {}
kFoldSplits = 5
for filterType in [None, "rollingMedian", "hampel"]:
for appl in applications:
applData["application"] = appl
applData["filter"] = filterType
applData["settings"] = {}
index = 0
for idx, line in enumerate(fileContent):
settings = line[0]
metrics = line[1]
if metrics:
if settings["application"] == appl and applData["filter"] == settings["filterData"]:
applData["settings"] = settings
if metrics["if"]:
if "if" not in applData:
applData["if"] = {}
applData["if"]["absApplOut"] = 0
applData["if"]["relApplOut"] = 0
applData["if"]["absPowOut"] = 0
applData["if"]["relPowOut"] = 0
applData["if"]["absApplOut"] += metrics["if"]["absApplOut"]
applData["if"]["relApplOut"] += metrics["if"]["relApplOut"]
applData["if"]["absPowOut"] += metrics["if"]["absPowOut"]
applData["if"]["relPowOut"] += metrics["if"]["relPowOut"]
if metrics["lof"]:
if "lof" not in applData:
applData["lof"] = {}
applData["lof"]["absApplOut"] = 0
applData["lof"]["relApplOut"] = 0
applData["lof"]["absPowOut"] = 0
applData["lof"]["relPowOut"] = 0
applData["lof"]["absApplOut"] += metrics["lof"]["absApplOut"]
applData["lof"]["relApplOut"] += metrics["lof"]["relApplOut"]
applData["lof"]["absPowOut"] += metrics["lof"]["absPowOut"]
applData["lof"]["relPowOut"] += metrics["lof"]["relPowOut"]
if applData["if"]:
applData["if"]["relApplOut"] /= kFoldSplits
applData["if"]["relPowOut"] /= kFoldSplits
if applData["lof"]:
applData["lof"]["relApplOut"] /= kFoldSplits
applData["lof"]["relPowOut"] /= kFoldSplits
applData["calc"] = {}
applData["calc"] = getOutlierPerAppl(fileContent, appl, filterType)
outPerAppl.append(copy.deepcopy(applData))
applData = {}
return outPerAppl
def getOutlierPerAppl(fileContent, appl, filterType):
applData = {}
applData["application"] = appl
applData["settings"] = {}
for line in fileContent:
settings = line[0]
metrics = line[1]
if metrics:
if settings["application"] == appl and settings["filterData"] == filterType:
applData["settings"] = settings
applData["dataLen"] = 0
if int(settings["testDataStr"].split("- ")[1]) > applData["dataLen"]:
applData["dataLen"] = int(settings["testDataStr"].split("- ")[1])
if "if" in metrics:
indexOffset = int(settings["testDataStr"].split(" -")[0])
if "if" not in applData:
applData["if"] = {}
applData["if"]["applOutOccur"] = preprocessOutlierOccurDict(metrics["if"]["applOutOccur"])
applData["if"]["outlierIndices"] = []
applData["if"]["powerOutVal"] = []
else:
currOccur = preprocessOutlierOccurDict(metrics["if"]["applOutOccur"])
applData["if"]["applOutOccur"] = {x: applData["if"]["applOutOccur"].get(x, 0) + currOccur.get(x, 0)
for x in set(applData["if"]["applOutOccur"]).union(currOccur)}
applData["if"]["outlierIndices"] += [x+indexOffset for x in metrics["if"]["applOutlier"]]
for val in metrics["if"]["powerOutVal"]:
if val not in applData["if"]["powerOutVal"]:
applData["if"]["powerOutVal"].append(val)
if "lof" in metrics:
indexOffset = int(settings["testDataStr"].split(" -")[0])
if "lof" not in applData:
applData["lof"] = {}
applData["lof"]["applOutOccur"] = preprocessOutlierOccurDict(metrics["lof"]["applOutOccur"])
applData["lof"]["outlierIndices"] = []
applData["lof"]["powerOutVal"] = []
else:
currOccur = preprocessOutlierOccurDict(metrics["lof"]["applOutOccur"])
applData["lof"]["applOutOccur"] = {x: applData["lof"]["applOutOccur"].get(x, 0) + currOccur.get(x, 0)
for x in set(applData["lof"]["applOutOccur"]).union(currOccur)}
applData["lof"]["outlierIndices"] += [x+indexOffset for x in metrics["lof"]["applOutlier"]]
for val in metrics["lof"]["powerOutVal"]:
if val not in applData["lof"]["powerOutVal"]:
applData["lof"]["powerOutVal"].append(val)
if "if" in applData:
total = 0
for item in applData["if"]["applOutOccur"].items():
if item[0] > 25:
total += item[1]
applData["if"]["powerOutValGreater25"] = total / applData["dataLen"]
if "lof" in applData:
total = 0
for item in applData["lof"]["applOutOccur"].items():
if item[0] > 25:
total += item[1]
applData["lof"]["powerOutValGreater25"] = total / applData["dataLen"]
return applData
### ------------------------------------------------------------- ###
def createTablesFromMetrics(fileContent):
settings = fileContent[0][0]
h5FileName = (settings["h5File"].split("/")[1]).split(".")[0]
#csvFileName = "simuResults/tableData_" + (settings["fileName"].split("simRes_")[1]).split(".json")[0] + ".json"
csvFileName = input("Filename: ")
applicationsAll, applicationsMerged = getAllApplications(fileContent)
outlierPerAppl = getOutPerAppl(fileContent, applicationsAll)
toCSV = []
col = {}
entry = {}
for appl in applicationsMerged:
col["application"] = appl
col["data"] = []
for outAppl in outlierPerAppl:
if appl == outAppl["application"][1]:
entry["dataset"] = h5FileName + " " + str(outAppl["application"][0])
entry["filter"] = outAppl["filter"]
if outAppl["if"]:
entry["if"] = {}
entry["if"]["absApplOut"] = round(outAppl["if"]["absApplOut"], 2)
entry["if"]["relApplOut"] = round(outAppl["if"]["relApplOut"], 2)
entry["if"]["absPowOut"] = round(outAppl["if"]["absPowOut"], 2)
entry["if"]["relPowOut"] = round(outAppl["if"]["relPowOut"], 2)
entry["if"]["relPowerOutValGreater25"] = round(outAppl["calc"]["if"]["powerOutValGreater25"], 2)
if outAppl["lof"]:
entry["lof"] = {}
entry["lof"]["absApplOut"] = round(outAppl["lof"]["absApplOut"], 2)
entry["lof"]["relApplOut"] = round(outAppl["lof"]["relApplOut"], 2)
entry["lof"]["absPowOut"] = round(outAppl["lof"]["absPowOut"], 2)
entry["lof"]["relPowOut"] = round(outAppl["lof"]["relPowOut"], 2)
entry["lof"]["relPowerOutValGreater25"] = round(outAppl["calc"]["lof"]["powerOutValGreater25"], 2)
col["data"].append(copy.deepcopy(entry))
toCSV.append(copy.deepcopy(col))
try:
print("Saving...")
print(csvFileName)
os.remove(csvFileName)
except OSError:
pass
with open(csvFileName, "w") as outfile:
json.dump(toCSV, outfile, indent=1)
### ------------------------------------------------------------- ###
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
if iteration == total:
print()
### ------------------------------------------------------------- ###
def preprocessOutlierOccurDict(applOutOccur):
outlierOccur = list(applOutOccur.items())
outlierOccur = [tuple(map(int, value)) for value in outlierOccur]
outlierOccur.sort(key=lambda x: x[0])
outlierOccurDict = {}
for val in outlierOccur:
outlierOccurDict[val[0]] = val[1]
return outlierOccurDict
### ------------------------------------------------------------- ###
def createOutlierOccurHistogram():
figFileName = text["plotDir"] + "/" + text["algo"] + "_" + text["h5FileName"] + str(text["building"]) + "_hist_" + str(text["application"]) + ".png"
figureTxt = "UOD using " + text["algo"] + " - " + text["h5FileName"] + " " + str(text["building"]) + " - Histogram - " + str(text["application"])
plt.figure(figureTxt)
plt.bar(range(len(outlierOccur)), list(outlierOccur.values()), align='center')
plt.title(figureTxt)
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
plt.savefig(figFileName)
plt.close()
def createAlgoComparison(data, text, outlierIndices):
figFileName = text["plotDir"] + "/" + text["algo"] + "_" + text["h5FileName"] + str(text["building"]) + "_diff_" + str(text["application"]) + ".png"
figureTxt = "UOD using " + text["algo"] + " - " + text["h5FileName"] + " " + str(text["building"]) + " - Comparison - " + str(text["application"])
plt.figure(figureTxt)
plt.subplot(2,1,1)
plt.plot(data["x"], label= "data")
plt.subplot(2,1,2)
plt.plot(outlierIndices, data["x"][outlierIndices], label="outlier")
plt.title(figureTxt)
plt.xlabel("Time in s")
plt.ylabel("Power Values in W")
plt.legend(loc="upper left")
plt.savefig(figFileName)
plt.close()
def createHistoComparison(outlier, text):
figFileName = text["plotDir"] + "/" + text["h5FileName"] + str(text["building"]) + "_" + str(text["application"]) + "-" + str(text["filter"]) + ".png"
plt.figure()
plt.tight_layout()
plt.subplot(2,2,1)
plt.title("Histogram - IF outliers")
plt.bar(range(len(outlier["if"]["applOutOccur"])), list(outlier["if"]["applOutOccur"].values()), align='center', label="IF", color="orange")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
#plt.plot()
plt.subplot(2,2,2)
plt.title("Histogram - LOF outliers")
plt.bar(range(len(outlier["lof"]["applOutOccur"])), list(outlier["lof"]["applOutOccur"].values()), align='center', label="LOF", color="blue")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
#plt.plot()
similarIndices = []
for item in outlier["lof"]["outlierIndices"]:
if item in outlier["if"]["outlierIndices"]:
similarIndices.append(item)
for item in outlier["if"]["outlierIndices"]:
if item in outlier["lof"]["outlierIndices"] and item not in similarIndices:
similarIndices.append(item)
plt.subplot(2,2,3)
plt.title("Histogram - IF & LOF outliers")
similarPowVal = {}
for lofOutK, lofOutI in outlier["lof"]["applOutOccur"].items():
for ifOutK, ifOutI in outlier["if"]["applOutOccur"].items():
if lofOutK == ifOutK:
if lofOutI > ifOutI:
similarPowVal[lofOutK] = ifOutI
else:
similarPowVal[lofOutK] = lofOutI
plt.bar(range(len(similarPowVal)), list(similarPowVal.values()), align='center', label="SIM", color="red")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
plt.subplot(2,2,4)
columns = ('total sim.', "rel. sim. IF", "rel. sim. LOF")
plt.axis("off")
cell_text = [[str(len(similarIndices)), str(round(len(similarIndices)/len(outlier["if"]["outlierIndices"]), 4)), str(round(len(similarIndices)/len(outlier["lof"]["outlierIndices"]), 4))]]
plt.table(cellText=cell_text,
colLabels=columns,
cellLoc="center",
loc="center"
)
plt.tight_layout()
plt.savefig(figFileName)
plt.close()
def createHistoComparisonAgg(outlier, text):
figFileName = text["plotDir"] + "/" + text["h5FileName"] + str(text["building"]) + "_" + str(text["application"]) + "-" + str(text["filter"]) + ".png"
plt.figure()
plt.tight_layout()
plt.subplot(2,2,1)
plt.title("Histogram - IF outliers")
plt.bar(range(len(outlier["if"]["applOutOccur"])), list(outlier["if"]["applOutOccur"].values()), align='center', label="IF", color="orange")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
#plt.plot()
plt.subplot(2,2,2)
plt.title("Histogram - LOF outliers")
plt.bar(range(len(outlier["lof"]["applOutOccur"])), list(outlier["lof"]["applOutOccur"].values()), align='center', label="LOF", color="blue")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
#plt.plot()
similarIndices = []
for item in outlier["lof"]["outlierIndices"]:
if item in outlier["if"]["outlierIndices"]:
similarIndices.append(item)
for item in outlier["if"]["outlierIndices"]:
if item in outlier["lof"]["outlierIndices"] and item not in similarIndices:
similarIndices.append(item)
plt.subplot(2,2,3)
plt.title("Histogram - IF & LOF outliers")
similarPowVal = {}
for lofOutK, lofOutI in outlier["lof"]["applOutOccur"].items():
for ifOutK, ifOutI in outlier["if"]["applOutOccur"].items():
if lofOutK == ifOutK:
if lofOutI > ifOutI:
similarPowVal[lofOutK] = ifOutI
else:
similarPowVal[lofOutK] = lofOutI
plt.bar(range(len(similarPowVal)), list(similarPowVal.values()), align='center', label="SIM", color="red")
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
plt.subplot(2,2,4)
columns = ('total sim.', "rel. sim. IF", "rel. sim. LOF")
plt.axis("off")
cell_text = [[str(len(similarIndices)), str(round(len(similarIndices)/len(outlier["if"]["outlierIndices"]), 4)), str(round(len(similarIndices)/len(outlier["lof"]["outlierIndices"]), 4))]]
plt.table(cellText=cell_text,
colLabels=columns,
cellLoc="center",
loc="center"
)
plt.tight_layout()
plt.savefig(figFileName)
plt.close()
### ------------------------------------------------------------- ###
def createPlotsFromMetric(fileContent):
plotDir = "simuResults/plots" # + (fileContent[0][0]["fileName"].split("simRes_")[1]).split(".json")[0]
if "aggre" in fileContent[0][0]["fileName"]: plotDir = "simuResults/plotsAgg"
h5FileName = (fileContent[0][0]["h5File"].split("/")[1]).split(".")[0]
try:
shutil.rmtree(plotDir)
except OSError:
pass
os.mkdir(plotDir)
applicationsAll, applicationsMerged = getAllApplications(fileContent)
if "aggregate" in fileContent[0][0]["fileName"]:
i = 0
for appl in applicationsAll:
printProgressBar(i, (len(applicationsAll))-1, prefix = 'Plotting:', suffix = 'Complete', length = 50)
figFileName = plotDir + "/" + h5FileName + "_" + str(appl) + ".png"
plt.figure()
for idx, filterType in enumerate([None, "rollingMedian", "hampel"]):
outlier = getOutlierPerAppl(fileContent, appl, filterType)
if idx == 0:
colour = "orange"
elif idx == 1:
colour = "blue"
else:
colour = "red"
plt.subplot(2,3,idx+1)
plt.title("IF - " + str(filterType))
plt.bar(range(len(outlier["if"]["applOutOccur"])), list(outlier["if"]["applOutOccur"].values()), align='center', label="IF", color=colour)
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
plt.subplot(2,3,3+idx+1)
plt.title("LOF - " + str(filterType))
plt.bar(range(len(outlier["lof"]["applOutOccur"])), list(outlier["lof"]["applOutOccur"].values()), align='center', label="LOF", color=colour)
plt.legend(loc="upper left")
plt.xlabel("Power Values in W")
plt.ylabel("# Counts")
i+=1
plt.tight_layout()
plt.savefig(figFileName)
plt.close()
else:
i = 0
for filterType in [None, "rollingMedian", "hampel"]:
i=0
for appl in applicationsAll:
outlier = getOutlierPerAppl(fileContent, appl, filterType)
printProgressBar(i, (len(applicationsAll))-1, prefix = 'Plotting:', suffix = 'Complete', length = 50)
text = {}
text["h5FileName"] = h5FileName
text["plotDir"] = plotDir
text["application"] = outlier["application"][1]
text["building"] = outlier["application"][0]
text["filter"] = filterType
createHistoComparison(outlier, text)
i+=1
### ------------------------------------------------------------- ###
if __name__ == "__main__":
jsonFile = getJsonFile()
if jsonFile is not None:
fileContent = loadJsonContent(jsonFile)
createTablesFromMetrics(fileContent)
createPlotsFromMetric(fileContent)
| 40.685771
| 191
| 0.52606
|
acff0bb10360be258d6857cb7d43f1072e1cadf8
| 360
|
py
|
Python
|
titanic/scripts/clf_reporter.py
|
wojtekwalczak/kaggle_titanic
|
f13258dcb9e964bcad61609fdcc374e3db47824e
|
[
"Apache-2.0"
] | null | null | null |
titanic/scripts/clf_reporter.py
|
wojtekwalczak/kaggle_titanic
|
f13258dcb9e964bcad61609fdcc374e3db47824e
|
[
"Apache-2.0"
] | null | null | null |
titanic/scripts/clf_reporter.py
|
wojtekwalczak/kaggle_titanic
|
f13258dcb9e964bcad61609fdcc374e3db47824e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys
from sklearn.metrics import classification_report
from titanic.db.train_test_loader import x_test, x_train, y_test, y_train
from titanic.utils.pick_pipeline import pick_pipeline
pipeline = pick_pipeline(sys.argv)
clf = pipeline.fit(x_train, y_train)
print(classification_report(y_test, clf.predict(x_test)))
| 30
| 73
| 0.838889
|
acff0bbd6629661a2c4123869e9e341b4df11838
| 4,113
|
py
|
Python
|
evap/evaluation/admin.py
|
karyon/EvaP
|
9026fe0af4e261be73423131cfc52a7e655c82e6
|
[
"MIT"
] | null | null | null |
evap/evaluation/admin.py
|
karyon/EvaP
|
9026fe0af4e261be73423131cfc52a7e655c82e6
|
[
"MIT"
] | null | null | null |
evap/evaluation/admin.py
|
karyon/EvaP
|
9026fe0af4e261be73423131cfc52a7e655c82e6
|
[
"MIT"
] | null | null | null |
from evap.evaluation.models import Contribution, Course, RatingAnswerCounter, Question, Questionnaire, Semester, TextAnswer, UserProfile
from django.conf import settings
from django.contrib import admin
from django import forms
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
class ContributionInline(admin.TabularInline):
model = Contribution
extra = 3
class CourseAdmin(admin.ModelAdmin):
model = Course
inlines = [ContributionInline]
list_display = ('__str__', 'semester', 'type')
list_filter = ('semester',)
readonly_fields = ('state',)
if not settings.DEBUG:
readonly_fields += ('voters',)
class QuestionInline(admin.TabularInline):
model = Question
extra = 3
class QuestionnaireAdmin(admin.ModelAdmin):
model = Questionnaire
inlines = [QuestionInline]
list_filter = ('obsolete',)
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required fields, plus a repeated password."""
password1 = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Password confirmation'), widget=forms.PasswordInput)
class Meta:
model = UserProfile
fields = ('username', 'email', 'first_name', 'last_name')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(_("Passwords don't match"))
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on the user, but replaces the password field with admin's password hash display field."""
password = ReadOnlyPasswordHashField()
class Meta:
model = UserProfile
fields = ('username', 'password', 'email', 'first_name', 'last_name')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserProfileAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('username', 'email', 'first_name', 'last_name', 'is_superuser')
list_filter = ()
fieldsets = (
(None, {'fields': ('username', 'email', 'password', 'login_key', 'login_key_valid_until')}),
('Personal info', {'fields': ('first_name','last_name', 'title')}),
('Delegates and cc-users', {'fields': ('delegates','cc_users')}),
('Permissions', {'fields': ('is_superuser', 'groups', 'user_permissions',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'first_name', 'last_name', 'password1', 'password2')}
),
)
search_fields = ('username',)
ordering = ('username',)
filter_horizontal = []
admin.site.register(Semester)
admin.site.register(Course, CourseAdmin)
admin.site.register(Questionnaire, QuestionnaireAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
if settings.DEBUG:
admin.site.register(TextAnswer)
admin.site.register(RatingAnswerCounter)
| 35.456897
| 147
| 0.688062
|
acff106dfdef833b2f0ed9ed55dcca336b02e0d4
| 1,638
|
py
|
Python
|
research/cv/mobilenetV3_small_x1_0/export.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/mobilenetV3_small_x1_0/export.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/mobilenetV3_small_x1_0/export.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
mobilenetv3_small export.
"""
import argparse
import numpy as np
from mindspore import context, Tensor, load_checkpoint, load_param_into_net, export
from src.config import config_ascend as config
from src.mobilenetv3 import mobilenet_v3_small
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--checkpoint_path', type=str, required=True, help='Checkpoint file path')
args_opt = parser.parse_args()
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = mobilenet_v3_small(num_classes=config.num_classes, multiplier=1.)
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
input_shp = [1, 3, config.image_height, config.image_width]
input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp).astype(np.float32))
export(net, input_array, file_name=config.export_file, file_format=config.export_format)
| 40.95
| 94
| 0.742369
|
acff107c8226ae60cea5b39c523330260aa9a984
| 2,959
|
py
|
Python
|
rcsb/utils/tests-ccdc/testCcdcSearchExecMp.py
|
rcsb/py-rcsb_utils_ccdc
|
2c635ef9fa03ddcb402fc402d225d48b37b4e301
|
[
"Apache-2.0"
] | null | null | null |
rcsb/utils/tests-ccdc/testCcdcSearchExecMp.py
|
rcsb/py-rcsb_utils_ccdc
|
2c635ef9fa03ddcb402fc402d225d48b37b4e301
|
[
"Apache-2.0"
] | null | null | null |
rcsb/utils/tests-ccdc/testCcdcSearchExecMp.py
|
rcsb/py-rcsb_utils_ccdc
|
2c635ef9fa03ddcb402fc402d225d48b37b4e301
|
[
"Apache-2.0"
] | null | null | null |
##
#
# File: testCcdcSearchMp.py
# Author: J. Westbrook
# Date: 15-Jan-2021
# Version: 0.001
#
# Updated:
#
##
"""
Test cases for chemical component search (mp) against the CCDC local Python API -
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "john.westbrook@rcsb.org"
__license__ = "Apache 2.0"
import glob
import logging
import unittest
import time
import os
import os.path
import platform
import resource
from rcsb.utils.ccdc.CcdcSearchExecMp import CcdcSearchExecMp
from rcsb.utils.ccdc import __version__
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class CcdcSearchMpTests(unittest.TestCase):
def setUp(self):
self.__workPath = os.path.join(HERE, "test-output")
self.__dataPath = os.path.join(HERE, "test-data")
self.__cachePath = os.path.join(HERE, "test-output", "CACHE")
self.__molFilePath = os.path.join(self.__dataPath, "molfiles")
self.__pythonRootPath = os.path.join(os.environ["CSD_PYTHON_ROOT_PATH"])
self.__csdHome = os.environ["CSDHOME"]
#
self.__simResultPath = os.path.join(self.__workPath, "test_chem_comp_ccdc_sim")
self.__ssResultPath = os.path.join(self.__workPath, "test_chem_comp_ccdc_ss_exec")
#
self.__startTime = time.time()
logger.info("Starting %s (%s) at %s", self.id(), __version__, time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testSubStructureSearchExecMp(self):
"""Test case: CCDC substructure search"""
try:
pL = glob.glob(os.path.join(self.__molFilePath, "*.mol2"), recursive=True)
logger.info("search list length %d", len(pL))
#
csmp = CcdcSearchExecMp(pythonRootPath=self.__pythonRootPath, csdHome=self.__csdHome)
csmp.runSearch(pL, self.__ssResultPath, searchType="substructure", numProc=2, chunkSize=2)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def suiteSearchTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(CcdcSearchMpTests("testSubStructureSearchExecMp"))
return suiteSelect
if __name__ == "__main__":
#
mySuite = suiteSearchTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| 34.406977
| 149
| 0.676918
|
acff11161183de7548681e0d954cb38cf8f14758
| 8,008
|
py
|
Python
|
tests/test_remote_monitor.py
|
triton-inference-server/model_analyzer
|
3151792403b4e3257dd5188fe745bbdf68e521e9
|
[
"Apache-2.0"
] | 115
|
2020-09-03T18:48:16.000Z
|
2022-03-31T13:10:48.000Z
|
tests/test_remote_monitor.py
|
triton-inference-server/model_analyzer
|
3151792403b4e3257dd5188fe745bbdf68e521e9
|
[
"Apache-2.0"
] | 101
|
2020-09-08T19:16:31.000Z
|
2022-03-31T23:32:54.000Z
|
tests/test_remote_monitor.py
|
triton-inference-server/model_analyzer
|
3151792403b4e3257dd5188fe745bbdf68e521e9
|
[
"Apache-2.0"
] | 39
|
2020-09-08T21:57:17.000Z
|
2022-03-31T12:13:10.000Z
|
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model_analyzer.monitor.remote_monitor import RemoteMonitor
from model_analyzer.record.types.gpu_free_memory import GPUFreeMemory
from model_analyzer.record.types.gpu_power_usage import GPUPowerUsage
from model_analyzer.record.types.gpu_used_memory import GPUUsedMemory
from model_analyzer.record.types.gpu_utilization import GPUUtilization
from model_analyzer.model_analyzer_exceptions \
import TritonModelAnalyzerException
from .common import test_result_collector as trc
from tests.mocks.mock_requests import MockRequests
import unittest
import time
TEST_DEVICE_NAME = 'TEST_DEVICE_NAME'
TEST_DEVICE_ID = 0
TEST_METRICS_URL = 'localhost:8002'
TEST_POWER_USAGE = 20
TEST_GPU_MEMORY_USAGE = 400000000
TEST_GPU_UTILIZATION = 0.5
TEST_TOTAL_BYTES = 1000000000
TEST_METRICS_RESPONSE = bytes(
'# HELP nv_inference_request_success Number of successful'
'inference requests, all batch sizes\n# TYPE nv_inference_request_success counter\n# '
'HELP nv_inference_request_failure Number of failed inference requests, all batch sizes\n# '
'TYPE nv_inference_request_failure counter\n# HELP nv_inference_count Number of inferences '
'performed\n# TYPE nv_inference_count counter\n# HELP nv_inference_exec_count Number of model'
'executions performed\n# TYPE nv_inference_exec_count counter\n# '
'HELP nv_inference_request_duration_us Cummulative inference request duration in microseconds\n# '
'TYPE nv_inference_request_duration_us counter\n# HELP nv_inference_queue_duration_us Cummulative '
'inference queuing duration in microseconds\n# TYPE nv_inference_queue_duration_us counter\n# HELP '
'nv_inference_compute_input_duration_us Cummulative compute input duration in microseconds\n# TYPE '
'nv_inference_compute_input_duration_us counter\n# HELP nv_inference_compute_infer_duration_us Cummulative '
'compute inference duration in microseconds\n# TYPE nv_inference_compute_infer_duration_us counter\n# HELP '
'nv_inference_compute_output_duration_us Cummulative inference compute output duration in microseconds\n# TYPE '
'nv_inference_compute_output_duration_us counter\n# HELP nv_gpu_utilization GPU utilization rate [0.0 - 1.0)\n# '
'TYPE nv_gpu_utilization gauge\nnv_gpu_utilization{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} '
f'{TEST_GPU_UTILIZATION}\n# HELP nv_gpu_memory_total_bytes GPU total memory, in bytes\n# TYPE nv_gpu_memory_total_bytes '
'gauge\nnv_gpu_memory_total_bytes{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} '
f'{TEST_TOTAL_BYTES}\n# HELP nv_gpu_memory_used_bytes GPU used memory, in bytes\n# TYPE '
'nv_gpu_memory_used_bytes gauge\nnv_gpu_memory_used_bytes{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} '
f'{TEST_GPU_MEMORY_USAGE}\n# HELP nv_gpu_power_usage GPU power usage in watts\n# TYPE nv_gpu_power_usage '
'gauge\nnv_gpu_power_usage{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} '
f'{TEST_POWER_USAGE}\n# HELP nv_gpu_power_limit GPU power management limit in watts\n# TYPE '
'nv_gpu_power_limit gauge\nnv_gpu_power_limit{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} 280.000000\n# '
'HELP nv_energy_consumption GPU energy consumption in joules since the Triton Server started\n# TYPE nv_energy_consumption '
'counter\nnv_energy_consumption{gpu_uuid="GPU-e35ba3d2-6eef-2bb9-e35c-6ef6eada4f11"} 1474.042000\n',
encoding='ascii')
class TestRemoteMonitor(trc.TestResultCollector):
def setUp(self):
self.mock_requests = MockRequests(
mock_paths=['model_analyzer.monitor.remote_monitor'])
self.mock_requests.start()
self.mock_requests.set_get_request_response(TEST_METRICS_RESPONSE)
def test_record_memory(self):
# One measurement every 0.1 seconds
frequency = 0.1
monitoring_time = 1
metrics = [GPUUsedMemory, GPUFreeMemory]
gpu_monitor = RemoteMonitor(TEST_METRICS_URL, frequency, metrics)
gpu_monitor.start_recording_metrics()
time.sleep(monitoring_time)
records = gpu_monitor.stop_recording_metrics()
# Assert instance types
for record in records:
self.assertIsInstance(record.device_uuid(), str)
self.assertIsInstance(record.value(), float)
if isinstance(record, GPUFreeMemory):
self.assertEqual(record.value(),
(TEST_TOTAL_BYTES - TEST_GPU_MEMORY_USAGE) //
1e6)
else:
self.assertEqual(record.value(), TEST_GPU_MEMORY_USAGE // 1e6)
self.assertIsInstance(record.timestamp(), int)
# The number of records should be dividable by number of metrics
self.assertEqual(len(records) % len(metrics), 0)
self.assertGreater(len(records), 0)
with self.assertRaises(TritonModelAnalyzerException):
gpu_monitor.stop_recording_metrics()
gpu_monitor.destroy()
metrics = ['UndefinedTag']
with self.assertRaises(TritonModelAnalyzerException):
RemoteMonitor(TEST_METRICS_URL, frequency, metrics)
def test_record_power(self):
# One measurement every 0.01 seconds
frequency = 0.1
monitoring_time = 1
metrics = [GPUPowerUsage]
gpu_monitor = RemoteMonitor(TEST_METRICS_URL, frequency, metrics)
gpu_monitor.start_recording_metrics()
time.sleep(monitoring_time)
records = gpu_monitor.stop_recording_metrics()
# Assert instance types
for record in records:
self.assertIsInstance(record.device_uuid(), str)
self.assertIsInstance(record.value(), float)
self.assertEqual(record.value(), TEST_POWER_USAGE)
self.assertIsInstance(record.timestamp(), int)
# The number of records should be dividable by number of metrics
self.assertEqual(len(records) % len(metrics), 0)
self.assertGreater(len(records), 0)
gpu_monitor.destroy()
def test_record_utilization(self):
# One measurement every 0.01 seconds
frequency = 0.1
monitoring_time = 1
metrics = [GPUUtilization]
gpu_monitor = RemoteMonitor(TEST_METRICS_URL, frequency, metrics)
gpu_monitor.start_recording_metrics()
time.sleep(monitoring_time)
records = gpu_monitor.stop_recording_metrics()
# Assert instance types
for record in records:
self.assertIsInstance(record.device_uuid(), str)
self.assertIsInstance(record.value(), float)
self.assertLessEqual(record.value(), 100)
self.assertEqual(record.value(), TEST_GPU_UTILIZATION * 100)
self.assertIsInstance(record.timestamp(), int)
# The number of records should be dividable by number of metrics
self.assertEqual(len(records) % len(metrics), 0)
self.assertGreater(len(records), 0)
gpu_monitor.destroy()
def test_immediate_start_stop(self):
frequency = 1
metrics = [GPUUsedMemory, GPUFreeMemory]
gpu_monitor = RemoteMonitor(TEST_METRICS_URL, frequency, metrics)
gpu_monitor.start_recording_metrics()
gpu_monitor.stop_recording_metrics()
gpu_monitor.destroy()
def tearDown(self):
self.mock_requests.stop()
if __name__ == '__main__':
unittest.main()
| 46.830409
| 128
| 0.733766
|
acff121abb9f26289a0509d11034c7756453eee5
| 1,332
|
py
|
Python
|
frappe/desk/doctype/route_history/route_history.py
|
ZirrusOne/z1n-frappe
|
c5a266d8dc1dbed077d2356bf189ba1fc3fa7497
|
[
"MIT"
] | null | null | null |
frappe/desk/doctype/route_history/route_history.py
|
ZirrusOne/z1n-frappe
|
c5a266d8dc1dbed077d2356bf189ba1fc3fa7497
|
[
"MIT"
] | null | null | null |
frappe/desk/doctype/route_history/route_history.py
|
ZirrusOne/z1n-frappe
|
c5a266d8dc1dbed077d2356bf189ba1fc3fa7497
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe.deferred_insert import deferred_insert as _deferred_insert
from frappe.model.document import Document
class RouteHistory(Document):
pass
def flush_old_route_records():
"""Deletes all route records except last 500 records per user"""
records_to_keep_limit = 500
users = frappe.db.sql('''
SELECT `user`
FROM `tabRoute History`
GROUP BY `user`
HAVING count(`name`) > %(limit)s
''', {
"limit": records_to_keep_limit
})
for user in users:
user = user[0]
last_record_to_keep = frappe.db.get_all('Route History',
filters={
'user': user,
},
limit=1,
limit_start=500,
fields=['modified'],
order_by='modified desc')
frappe.db.sql('''
DELETE
FROM `tabRoute History`
WHERE `modified` <= %(modified)s and `user`=%(user)s
''', {
"modified": last_record_to_keep[0].modified,
"user": user
})
@frappe.whitelist()
def deferred_insert(routes):
routes = [
{
"user": frappe.session.user,
"route": route.get("route"),
"creation": route.get("creation"),
}
for route in frappe.parse_json(routes)
]
_deferred_insert("Route History", json.dumps(routes))
| 21.483871
| 70
| 0.692192
|
acff12846ab8243bfe5b6c158adff9e32d960781
| 33,549
|
py
|
Python
|
test/test_ops.py
|
Daybreak2019/pytorch
|
40373e754c6f0c64117f18151cc82ab0eecb5861
|
[
"Intel"
] | 1
|
2021-04-11T08:27:46.000Z
|
2021-04-11T08:27:46.000Z
|
test/test_ops.py
|
subhankar01/pytorch
|
b80c6f863f2327c712c478f67c248b94d66b65ac
|
[
"Intel"
] | 1
|
2022-01-18T12:17:29.000Z
|
2022-01-18T12:17:29.000Z
|
test/test_ops.py
|
subhankar01/pytorch
|
b80c6f863f2327c712c478f67c248b94d66b65ac
|
[
"Intel"
] | 2
|
2021-07-02T10:18:21.000Z
|
2021-08-18T10:10:28.000Z
|
from functools import partial, wraps
import warnings
import torch
from torch.testing import \
(FileCheck, floating_and_complex_types_and)
from torch.testing._internal.common_utils import \
(TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, make_tensor)
from torch.testing._internal.common_methods_invocations import \
(op_db, method_tests)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, onlyCPU, onlyOnCPUAndCUDA, skipCUDAIfRocm, OpDTypes)
from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
from torch.autograd.gradcheck import gradcheck, gradgradcheck
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, \
check_alias_annotation
from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining
# Get names of all the operators which have entry in `method_tests` (legacy testing infra)
method_tested_operators = set(map(lambda test_details: test_details[0], method_tests()))
# Tests that apply to all operators
class TestOpInfo(TestCase):
exact_dtype = True
# Verifies that ops have their unsupported dtypes
# registered correctly by testing that each claimed unsupported dtype
# throws a runtime error
@skipCUDAIfRocm
@onlyOnCPUAndCUDA
@ops(op_db, dtypes=OpDTypes.unsupported)
def test_unsupported_dtypes(self, device, dtype, op):
# sample_inputs can have a function for generating the input that doesn't work for specified dtype
# https://github.com/pytorch/pytorch/issues/49024
with self.assertRaises(RuntimeError):
samples = op.sample_inputs(device, dtype)
if len(samples) == 0:
self.skipTest("Skipped! No sample inputs!")
# NOTE: only tests on first sample
sample = samples[0]
op(sample.input, *sample.args, **sample.kwargs)
# Verifies that ops have their supported dtypes
# registered correctly by testing that each claimed supported dtype
# does NOT throw a runtime error
# In addition verifies that the generated sample_inputs have the requested device and dtype
@onlyOnCPUAndCUDA
@ops(op_db, dtypes=OpDTypes.supported)
def test_supported_dtypes(self, device, dtype, op):
for sample in op.sample_inputs(device, dtype):
op(sample.input, *sample.args, **sample.kwargs)
# NOTE: only check the first tensor in the iterable of tensors
sample_input = sample.input[0] if is_iterable_of_tensors(sample.input) else sample.input
self.assertTrue(sample_input.dtype == dtype)
self.assertTrue(sample_input.device.type == self.device_type)
# Verifies that backward for each supported floating or complex dtype
# does NOT throw a runtime error.
# TODO: support multi-tensor outputs
@onlyOnCPUAndCUDA
@ops(op_db, allowed_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16))
def test_supported_backward(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
if not op.supports_complex_autograd and dtype.is_complex:
self.skipTest("Skipped! Complex autograd not supported.")
for sample in op.sample_inputs(device, dtype, requires_grad=True):
result = op(sample.input, *sample.args, **sample.kwargs)
if not isinstance(result, torch.Tensor):
continue
result.sum().backward()
# Verifies that ops do not have an entry in
# `method_tests` (legacy testing infra).
@onlyCPU
@ops(op_db, allowed_dtypes=[torch.float32])
def test_duplicate_method_tests(self, device, dtype, op):
self.assertFalse(op.name in method_tested_operators)
# gradcheck requires double precision
_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
class TestGradients(TestCase):
exact_dtype = True
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
def _check_helper(self, device, dtype, op, variant, check):
if variant is None:
self.skipTest("Skipped! Variant not implemented.")
if not op.supports_dtype(dtype, torch.device(device).type):
self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}")
def is_inplace(variant):
if hasattr(variant, "__wrapped__"):
return variant.__wrapped__ is op.get_inplace()
return variant is op.get_inplace()
samples = op.sample_inputs(device, dtype, requires_grad=True,
for_inplace_variant=is_inplace(variant))
for sample in samples:
# Note on TensorList inputs
#
# gradcheck does not support TensorList inputs so here we pass TensorList
# inputs of size n as n single Tensor inputs to gradcheck and wrap the op
# in a function that puts the n Tensor inputs back into a TensorList
def fn(*inputs):
# Put tensors back into TensorList since we splat them when passing to gradcheck
if is_iterable_of_tensors(sample.input):
n = len(sample.input)
inputs = (inputs[:n], *inputs[n:])
output = op.gradcheck_wrapper(variant, *inputs, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# Splat TensorList inputs into single Tensor inputs
gradcheck_args = (sample.input,) if isinstance(sample.input, torch.Tensor) else tuple(sample.input)
gradcheck_args += sample.args
if check == 'gradcheck':
self.assertTrue(gradcheck(fn, gradcheck_args,
check_batched_grad=op.check_batched_grad,
check_grad_dtypes=True))
elif check == 'gradgradcheck':
self.assertTrue(gradgradcheck(fn, gradcheck_args,
gen_non_contig_grad_outputs=False,
check_batched_grad=op.check_batched_gradgrad,
check_grad_dtypes=True))
self.assertTrue(gradgradcheck(fn, gradcheck_args,
gen_non_contig_grad_outputs=True,
check_batched_grad=op.check_batched_gradgrad,
check_grad_dtypes=True))
else:
self.assertTrue(False, msg="Unknown check requested!")
def _grad_test_helper(self, device, dtype, op, variant):
return self._check_helper(device, dtype, op, variant, 'gradcheck')
def _gradgrad_test_helper(self, device, dtype, op, variant):
return self._check_helper(device, dtype, op, variant, 'gradgradcheck')
def _skip_helper(self, op, dtype):
if not op.supports_autograd:
self.skipTest("Skipped! autograd not supported.")
if not op.supports_complex_autograd and dtype.is_complex:
self.skipTest("Skipped! Complex autograd not supported.")
# Tests that gradients are computed correctly
@_gradcheck_ops(op_db)
def test_fn_grad(self, device, dtype, op):
self._skip_helper(op, dtype)
self._grad_test_helper(device, dtype, op, op.get_op())
# Method grad (and gradgrad, see below) tests are disabled since they're
# costly and redundant with function grad (and gradgad) tests
# @_gradcheck_ops(op_db)
# def test_method_grad(self, device, dtype, op):
# self._skip_helper(op, dtype)
# self._grad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db)
def test_inplace_grad(self, device, dtype, op):
self._skip_helper(op, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
self._grad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
# Test that gradients of gradients are computed correctly
@_gradcheck_ops(op_db)
def test_fn_gradgrad(self, device, dtype, op):
self._skip_helper(op, dtype)
self._gradgrad_test_helper(device, dtype, op, op.get_op())
# Method gradgrad (and grad, see above) tests are disabled since they're
# costly and redundant with function gradgrad (and grad) tests
# @_gradcheck_ops(op_db)
# def test_method_gradgrad(self, device, dtype, op):
# self._skip_helper(op, dtype)
# self._gradgrad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db)
def test_inplace_gradgrad(self, device, dtype, op):
self._skip_helper(op, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
self._gradgrad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
# Tests operators for consistency between JIT and eager, also checks
# correctness of JIT specific alias schemas and intended
# autodifferentiation behavior.
# Inherits from JitCommonTestCase instead of TestCase directly to share
# functionality with original test_jit.py method operator tests
class TestCommon(JitCommonTestCase):
exact_dtype = True
# variant testing is only done with torch.float and torch.cfloat to avoid
# excessive test times and maximize signal to noise ratio
_variant_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=(torch.float, torch.cfloat))
# alias testing is only done with troch.float for the same reason
_alias_ops = partial(ops, dtypes=OpDTypes.supported,
allowed_dtypes=(torch.float,))
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (method, inplace)
# against eager's gold standard op function variant
@_variant_ops(op_db)
def test_variant_consistency_eager(self, device, dtype, op):
# Acquires variants (method variant, inplace variant, aliases)
method = op.method_variant
inplace = op.inplace_variant
# list of all inplace ops: inplace variant + alias inplace variants if exist
inplace_ops = [inplace, ]
variants = [method, inplace]
for a_op in op.aliases:
variants.append(a_op.op)
variants.append(a_op.method_variant)
variants.append(a_op.inplace_variant)
inplace_ops.append(a_op.inplace_variant)
inplace_variants = tuple(filter(None, inplace_ops))
variants = tuple(filter(None, variants))
outplace_variants = tuple(set(variants) - set(inplace_variants))
_requires_grad = (op.supports_autograd and
(dtype.is_floating_point or op.supports_complex_autograd))
samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad,
for_inplace_variant=False)
def _test_consistency_helper(samples, variants):
for sample in samples:
# TODO: Check grad for all Tensors requiring grad if sample.input is TensorList
tensor = sample.input if isinstance(sample.input, torch.Tensor) else sample.input[0]
# Computes function forward and backward values
tensor.grad = None
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
expected_grad = None
# Skips inplace variants if the output dtype is not the same as
# the input dtype
skip_inplace = False
if (isinstance(expected_forward, torch.Tensor) and
expected_forward.dtype is not tensor.dtype):
skip_inplace = True
# TODO: backward consistency only supported for single tensor outputs
# TODO: backward consistency only checked on sample.input, not all
# tensor inputs
# TODO: update to handle checking grads of all tensor inputs as
# derived from each tensor output
if (op.supports_autograd and isinstance(expected_forward, torch.Tensor)
and (dtype.is_floating_point or op.supports_complex_autograd)):
expected_forward.sum().backward()
expected_grad = tensor.grad
# Test eager consistency
for variant in variants:
# Skips inplace ops
if variant in inplace_ops and skip_inplace:
continue
# Compares variant's forward
# Note: copies the to-be-modified input when testing the inplace variant
tensor.grad = None
cloned = clone_input_helper(sample.input) if variant in inplace_ops else sample.input
variant_forward = variant(cloned,
*sample.args,
**sample.kwargs)
self.assertEqual(expected_forward, variant_forward)
# Compares variant's backward
if expected_grad is not None and \
(variant not in inplace_ops or op.supports_inplace_autograd):
variant_forward.sum().backward()
self.assertEqual(expected_grad, tensor.grad)
_test_consistency_helper(samples, outplace_variants)
def _test_inplace_preserve_storage(samples, variants):
for sample in samples:
# Skips inplace variants if the output dtype is not the same as
# the input dtype
expected_forward = op(sample.input, *sample.args, **sample.kwargs)
tensor = sample.input if isinstance(sample.input, torch.Tensor) else sample.input[0]
skip_inplace = False
if (isinstance(expected_forward, torch.Tensor) and
expected_forward.dtype is not tensor.dtype):
skip_inplace = True
if skip_inplace:
return
for variant in variants:
cloned = clone_input_helper(sample.input) if variant in inplace_ops else sample.input
inp_tensor = cloned if isinstance(cloned, torch.Tensor) else cloned[0]
data_ptr = inp_tensor.data_ptr()
variant_forward = variant(cloned,
*sample.args,
**sample.kwargs)
# TODO Support non-tensor outputs if they exist for inplace ops
if (isinstance(variant_forward, torch.Tensor)):
self.assertEqual(data_ptr, variant_forward.data_ptr(), atol=0, rtol=0)
else:
self.assertTrue(False, "Non-tensor outputs for inplace ops are not supported")
if len(inplace_ops) > 0:
inplace_samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad,
for_inplace_variant=True)
_test_consistency_helper(inplace_samples, inplace_variants)
_test_inplace_preserve_storage(inplace_samples, inplace_variants)
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (function, method, inplace)
# and runtimes (eager, traced, scripted).
# TODO WARNING: inplace x {traced, scripted} not currently tested
@_variant_ops(op_db)
def test_variant_consistency_jit(self, device, dtype, op):
_requires_grad = op.supports_autograd and (dtype.is_floating_point or op.supports_complex_autograd)
samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad)
for sample in samples:
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
'function': func, 'method': method,
}
# Test traced and scripted consistency
for func_type, variant in variants.items():
if variant is None:
continue
# Create accessor for script function variant
name = op.name + '_' if func_type == 'inplace' else op.name
# run with disable_autodiff_subgraph_inlining(True) to test
# autodiff support. Context manager forces the graph to contain
# DifferentiableGraph nodes if they are present
with disable_autodiff_subgraph_inlining():
# Check scripted forward, grad, and grad grad
script_fn = create_script_fn(self, name, func_type)
def out_fn(output):
# Processes the output for autograd
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
check_against_reference(self,
script_fn,
func,
out_fn,
(sample.input,) + sample.args,
sample.kwargs,
no_grad=not _requires_grad)
# Check traced forward, grad, and grad grad
traced_fn = create_traced_fn(self, variant)
check_against_reference(self,
traced_fn,
func,
out_fn,
(sample.input,) + sample.args,
sample.kwargs,
no_grad=not _requires_grad)
# Check alias annotation schema for correctness (make
# sure inputs that aren't supposed to be modified aren't)
# Note: only runs in float32 and int64 because schema isn't affected by dtype,
# so running it on all dtypes is would be excessive
if dtype in [torch.float32, torch.int32]:
check_alias_annotation(name, (sample.input,) + sample.args, sample.kwargs,
func_type=func_type, aten_name=op.aten_name)
# Check autodifferentiation of nodes for traced and scripted graphs, only need to check once per sample
if dtype is torch.float32:
# Sandcastle doesn't fuse nodes
if IS_SANDCASTLE:
# fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs
nonfusible_nodes = op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes
fusible_nodes = []
else:
nonfusible_nodes = op.autodiff_nonfusible_nodes
fusible_nodes = op.autodiff_fusible_nodes
self.assertAutodiffNode(traced_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
self.assertAutodiffNode(script_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
@_alias_ops((op for op in op_db if op.aliases))
def test_jit_alias_remapping(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
if len(samples) == 0:
self.skipTest("Skipped! No sample inputs!")
# NOTE: only tests on first sample
sample = samples[0]
# [Scripting Data Preparation]
# Prepare data for test scripting
# Below we prepare strings of args/kwargs with and without type annotations.
# These strings are inserted into function template strings which is then torch scripted.
# - args string is ["t0"] corresponding to the "input" tensor required by the op
# - args_annot_kw is the string for the template function signature, for example,
# ["t0", "s0: float", "s1: bool", "max: float = 1.0", "min: float = 0.0"] ->
# def fn(t0, s0: float, s1: bool, max: float = 1.0, min: float = 0.0)
# - args_kw is the string of args/kwargs used to call the op, same as args_annot_kw but
# without type annotations
args = ["t0"]
args_annot_kw = args + \
[f"s{i}: {type(v).__name__}" for i, v in enumerate(sample.args)] + \
[f"{k}: {type(v).__name__} = {v}" for k, v in sample.kwargs.items()]
args_kw = args + \
[f"s{i}" for i in range(len(sample.args))] + \
[f"{k}={v}" for k, v in sample.kwargs.items()]
# Prepare data for test tracing
sample_args_kwargs = ()
if len(sample.args) > 0:
sample_args_kwargs += (sample.args, )
if len(sample.kwargs) > 0:
sample_args_kwargs += (sample.kwargs, )
original_name = op.aten_name
original_name_inplace = original_name + "_"
expected_dtype = op(sample.input, *sample.args, **sample.kwargs).dtype
for a_op in op.aliases:
inplace = a_op.inplace_variant
method_or_inplace = [a_op.inplace_variant, a_op.method_variant]
variants = (v for v in (a_op.op, a_op.method_variant, a_op.inplace_variant) if v is not None)
# Test scripting:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
if variant in method_or_inplace:
fn_template = '''
def _fn(t0{c}{args_annot_kw}):
return t0.{alias_name}({args_kw})
'''
# remove the first input tensor
script = fn_template.format(
c=", " if len(args_kw[1:]) > 1 or len(args_annot_kw[1:]) >= 1 else "",
args_annot_kw=", ".join(args_annot_kw[1:]),
args_kw=", ".join(args_kw[1:]),
alias_name=variant_name,
)
else:
fn_template = '''
def _fn({args_annot_kw}):
return variant({args_kw})
'''
script = fn_template.format(
args_annot_kw=", ".join(args_annot_kw),
args_kw=", ".join(args_kw),
)
scripted = torch.jit.CompilationUnit(script)._fn
if (variant is inplace and not torch.can_cast(expected_dtype, dtype)):
try:
inp = clone_input_helper(sample.input)
scripted(inp, *sample.args, **sample.kwargs)
except Exception as e:
continue
self.fail("Inplace operation on integer tensor that should be promoted to float didn't fail!")
inp = clone_input_helper(sample.input)
scripted(inp, *sample.args, **sample.kwargs)
inp = clone_input_helper(sample.input)
graph = scripted.graph_for(inp, *sample.args, **sample.kwargs)
FileCheck().check(op.aten_name).check_not(variant_name).run(graph)
# Test tracing:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
def _fn(*sample_args, **sample_kwargs):
return variant(*sample_args, **sample_kwargs)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced = torch.jit.trace(_fn, *inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced(*inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
graph = traced.graph_for(*inp)
FileCheck().check(op_name).check_not(variant_name).run(graph)
# Validates ops implement the correct out= behavior
# See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# for a description of the correct behavior
# TODO: operations that support out= but don't support float
# are not covered by this test.
@ops(op_db, allowed_dtypes=(torch.float,))
def test_out(self, device, dtype, op):
# TODO: verify the op doesn't support the out= kwarg
if not op.supports_out:
self.skipTest("Skipped! Op doesn't support out= kwarg.")
# NOTE: only tests on first sample
samples = op.sample_inputs(device, dtype)
sample = samples[0]
# calls it normally to get the expected result
expected = op(sample.input, *sample.args, **sample.kwargs)
op_out = partial(op, sample.input, *sample.args, **sample.kwargs)
# Short-circuits if output is not a single tensor or an
# iterable of tensors
if not isinstance(expected, torch.Tensor) and not is_iterable_of_tensors(expected, include_empty=True):
self.skipTest("Skipped! Only supports single tensor or iterable of tensor outputs.")
# A wrapper around map that works with single tensors and always
# instantiates the map. Used below to apply transforms to
# single tensor and iterable tensor outputs.
def _apply_out_transform(fn, out):
if isinstance(out, torch.Tensor):
return fn(out)
# assumes (see above) that out is an iterable of tensors
return tuple(map(fn, out))
# Case 0: out= with the correct shape, dtype, and device
# but NaN values for floating point and complex tensors, and
# maximum values for integer tensors.
# Expected behavior: out= values have no effect on the computation.
def _case_zero_transform(t):
try:
info = torch.iinfo(t.dtype)
return torch.full_like(t, info.max)
except TypeError as te:
# for non-integer types fills with NaN
return torch.full_like(t, float('nan'))
out = _apply_out_transform(_case_zero_transform, expected)
result = op_out(out=out)
self.assertEqual(expected, out)
# Checks that the returned value shares storage with out
# NOTE: only checks on the CPU and CUDA device types since some
# device types don't have storage
if self.device_type == 'cpu' or self.device_type == 'cuda':
if isinstance(out, torch.Tensor):
self.assertEqual(out.storage().data_ptr(), result.storage().data_ptr())
else:
for out_t, result_t in zip(out, result):
self.assertEqual(out_t.storage().data_ptr(), result_t.storage().data_ptr())
# Case 1: out= with the correct shape, dtype, and device,
# but noncontiguous.
# Expected behavior: strides are respected and `out` storage is not changed.
def _case_one_transform(t):
return make_tensor(t.shape,
dtype=t.dtype,
device=t.device,
discontiguous=True)
# Extracts strides from a tensor or iterable of tensors into a tuple
def _extract_strides(out):
if isinstance(out, torch.Tensor):
return (out.stride(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.stride(), out))
def _extract_data_ptrs(out):
if isinstance(out, torch.Tensor):
return (out.data_ptr(),)
# assumes (see above) that out is an iterable of tensors
return tuple(map(lambda t: t.data_ptr(), out))
out = _apply_out_transform(_case_one_transform, expected)
original_strides = _extract_strides(out)
original_ptrs = _extract_data_ptrs(out)
op_out(out=out)
final_strides = _extract_strides(out)
final_ptrs = _extract_data_ptrs(out)
self.assertEqual(expected, out)
self.assertEqual(original_strides, final_strides)
self.assertEqual(original_ptrs, final_ptrs)
# Case 2: out= with the correct dtype and device, but the wrong shape
# Expected behavior: resize with a warning.
def _case_two_transform(t):
wrong_shape = list(t.shape)
if len(wrong_shape) == 0:
# Handles scalar tensor case (empty list)
wrong_shape = [2]
else:
wrong_shape[-1] = wrong_shape[-1] + 1
return make_tensor(wrong_shape, dtype=t.dtype, device=t.device)
out = _apply_out_transform(_case_two_transform, expected)
msg_fail = "Resized a non-empty tensor but did not warn about it."
with self.assertWarnsRegex(UserWarning, "An output with one or more elements", msg=msg_fail):
op_out(out=out)
self.assertEqual(expected, out)
# Case 3: out= with the correct dtype and device, but an empty
# tensor.
# Expected behavior: resize without warning.
def _case_three_transform(t):
return make_tensor((0,),
dtype=t.dtype,
device=t.device)
out = _apply_out_transform(_case_three_transform, expected)
with warnings.catch_warnings(record=True) as caught:
warnings.simplefilter("always")
op_out(out=out)
# Verifies no warning is a resize warning
for w in caught:
if "An output with one or more elements" in str(w.message):
self.fail("Resizing an out= argument with no elements threw a resize warning!")
self.assertEqual(expected, out)
# Case 4: out= with correct shape and dtype, but wrong device.
wrong_device = None
if torch.device(device).type != 'cpu':
wrong_device = 'cpu'
elif torch.cuda.is_available():
wrong_device = 'cuda'
if wrong_device is not None:
def _case_four_transform(t):
return make_tensor(t.shape, dtype=t.dtype, device=wrong_device)
out = _apply_out_transform(_case_four_transform, expected)
msg_fail = f"Expected RuntimeError when calling with input.device={device} and out.device={wrong_device}"
with self.assertRaises(RuntimeError, msg=msg_fail):
op_out(out=out)
# Case 5: out= with correct shape and device, but a dtype
# that output cannot be "safely" cast to (long).
# Expected behavior: error.
# NOTE: this case is filtered by dtype since some ops produce
# bool tensors, for example, which can be safely cast to any
# dtype. It is applied when single tensors are floating point or complex
# dtypes, or if an op returns multiple tensors when at least one such
# tensor is a floating point or complex dtype.
_dtypes = floating_and_complex_types_and(torch.float16, torch.bfloat16)
if (isinstance(expected, torch.Tensor) and expected.dtype in _dtypes or
(not isinstance(expected, torch.Tensor) and any(t.dtype in _dtypes for t in expected))):
def _case_five_transform(t):
return make_tensor(t.shape, dtype=torch.long, device=t.device)
out = _apply_out_transform(_case_five_transform, expected)
msg_fail = "" if not isinstance(expected, torch.Tensor) else \
("Expected RuntimeError when doing an unsafe cast from a result of dtype "
f"{expected.dtype} into an out= with dtype torch.long")
with self.assertRaises(RuntimeError, msg=msg_fail):
op_out(out=out)
instantiate_device_type_tests(TestOpInfo, globals())
instantiate_device_type_tests(TestGradients, globals())
instantiate_device_type_tests(TestCommon, globals())
if __name__ == '__main__':
run_tests()
| 48.341499
| 124
| 0.605532
|
acff13edc3434c095d3f4bfe7e23fd1bc45b2f97
| 8,363
|
py
|
Python
|
src/the_tale/the_tale/game/bills/tests/test_building_create.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/the_tale/the_tale/game/bills/tests/test_building_create.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/the_tale/the_tale/game/bills/tests/test_building_create.py
|
Alacrate/the-tale
|
43b211f3a99e93964e95abc20a8ed649a205ffcf
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import smart_imports
smart_imports.all()
class BuildingCreateTests(helpers.BaseTestPrototypes):
def setUp(self):
super(BuildingCreateTests, self).setUp()
self.person_1 = sorted(self.place1.persons, key=lambda person: -politic_power_storage.persons.total_power_fraction(person.id))[0]
self.person_2 = sorted(self.place2.persons, key=lambda person: -politic_power_storage.persons.total_power_fraction(person.id))[-1]
self.accepted_position_1 = random.choice(list(places_logic.get_available_positions(center_x=self.person_1.place.x, center_y=self.person_1.place.y)))
self.accepted_position_2 = random.choice(list(places_logic.get_available_positions(center_x=self.person_2.place.x, center_y=self.person_2.place.y)))
self.bill_data = bills.building_create.BuildingCreate(person_id=self.person_1.id,
old_place_name_forms=self.place1.utg_name,
utg_name=game_names.generator().get_test_name('building-name'),
x=self.accepted_position_1[0],
y=self.accepted_position_1[1])
self.bill = prototypes.BillPrototype.create(self.account1, 'bill-1-caption', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.person_id, self.person_1.id)
self.assertEqual(self.bill.data.x, self.accepted_position_1[0])
self.assertEqual(self.bill.data.y, self.accepted_position_1[1])
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.person_1.place), id(self.person_1)])
def test_update(self):
data = linguistics_helpers.get_word_post_data(game_names.generator().get_test_name('new-building-name'), prefix='name')
data.update({'caption': 'new-caption',
'chronicle_on_accepted': 'chronicle-on-accepted',
'person': self.person_2.id,
'x': self.accepted_position_2[0],
'y': self.accepted_position_2[1]})
form = self.bill.data.get_user_form_update(post=data)
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.person_id, self.person_2.id)
self.assertEqual(self.bill.data.x, self.accepted_position_2[0])
self.assertEqual(self.bill.data.y, self.accepted_position_2[1])
self.assertEqual(self.bill.data.base_name, 'new-building-name-нс,ед,им')
def check_persons_from_place_in_choices(self, place, persons_ids):
for person in place.persons:
if not person.has_building:
self.assertTrue(person.id in persons_ids)
else:
self.assertFalse(person.id in persons_ids)
def test_user_form_choices(self):
places_logic.create_building(self.place2.persons[0], utg_name=game_names.generator().get_test_name('r-building-name'))
form = self.bill.data.get_user_form_update(initial={'person': self.bill.data.person_id})
persons_ids = []
for city_name, person_choices in form.fields['person'].choices:
persons_ids.extend(choice_id for choice_id, choice_name in person_choices)
self.assertTrue(self.bill.data.person_id in persons_ids)
self.check_persons_from_place_in_choices(self.place1, persons_ids)
self.check_persons_from_place_in_choices(self.place2, persons_ids)
self.check_persons_from_place_in_choices(self.place3, persons_ids)
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
self.assertEqual(places_models.Building.objects.all().count(), 0)
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
noun = game_names.generator().get_test_name('r-building-name')
data = self.bill.user_form_initials
data.update(linguistics_helpers.get_word_post_data(noun, prefix='name'))
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form, self.account1)
self.assertTrue(self.bill.apply())
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertEqual(places_models.Building.objects.all().count(), 1)
building = places_storage.buildings.all()[0]
self.assertEqual(building.person.id, self.person_1.id)
self.assertEqual(building.place.id, self.place1.id)
self.assertEqual(building.x, self.accepted_position_1[0])
self.assertEqual(building.y, self.accepted_position_1[1])
self.assertEqual(building.utg_name, noun)
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_duplicate_apply(self):
self.assertEqual(places_models.Building.objects.all().count(), 0)
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
noun = game_names.generator().get_test_name('building-name')
data = self.bill.user_form_initials
data.update(linguistics_helpers.get_word_post_data(noun, prefix='name'))
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form, self.account1)
dup_noun = game_names.generator().get_test_name('dup-building-name')
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
bill.state = relations.BILL_STATE.VOTING
bill.save()
data = bill.user_form_initials
data.update(linguistics_helpers.get_word_post_data(dup_noun, prefix='name'))
data['approved'] = True
form = bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
bill.update_by_moderator(form, self.account1)
# apply first bill
self.assertTrue(self.bill.apply())
# apply second bill
self.assertTrue(bill.apply())
self.assertEqual(places_models.Building.objects.all().count(), 1)
building = places_logic.load_building(places_models.Building.objects.all()[0].id)
self.assertEqual(building.utg_name, noun)
self.assertNotEqual(building.utg_name, dup_noun)
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_has_meaning__duplicate(self):
self.assertEqual(places_models.Building.objects.all().count(), 0)
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
noun = game_names.generator().get_test_name('building-name')
data = self.bill.user_form_initials
data.update(linguistics_helpers.get_word_post_data(noun, prefix='name'))
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form, self.account1)
self.assertTrue(self.bill.apply())
form = bills.building_create.BuildingCreate.ModeratorForm(data)
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
bill.state = relations.BILL_STATE.VOTING
bill.save()
self.assertFalse(bill.has_meaning())
def test_has_meaning__wrong_position(self):
self.bill.data.x = 1000
self.assertFalse(self.bill.has_meaning())
| 45.205405
| 156
| 0.691618
|
acff146028b079db979b72e84c9e9a0719a42e16
| 2,307
|
py
|
Python
|
KissAnimeDownloader.py
|
Aditya8795/Python-Scripts
|
e2a66d5f864571c5e2ff18bac98f7a1ab96a21ee
|
[
"MIT"
] | 1
|
2015-10-18T13:32:37.000Z
|
2015-10-18T13:32:37.000Z
|
KissAnimeDownloader.py
|
Aditya8795/Python-Scripts
|
e2a66d5f864571c5e2ff18bac98f7a1ab96a21ee
|
[
"MIT"
] | null | null | null |
KissAnimeDownloader.py
|
Aditya8795/Python-Scripts
|
e2a66d5f864571c5e2ff18bac98f7a1ab96a21ee
|
[
"MIT"
] | null | null | null |
# KISSANIME - http://kissanime.com/ ANIME DOWNLOADER
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
BASE_URL = "http://kissanime.com/Anime/"
DELAY = 10 # change it depending on your internet connectivity
episodeURLs = []
downloadURLs = []
#------------------------------- EDIT THIS AND ADD YOUR REQUIRED ANIME NAME
AnimeName = "Nodame-Cantabile"
#-------------------------------
URL = BASE_URL + AnimeName
print "Opening firefox Browser"
driver = webdriver.Firefox()
print "Navigating to Login Page"
driver.get("http://kissanime.com/Login")
print "DELAY start"
time.sleep(DELAY)
print "DELAY end"
print "Logging in"
user = driver.find_element_by_name("username")
passwd = driver.find_element_by_name("password")
user.send_keys("<ur username>")
passwd.send_keys("<ur password>")
passwd.send_keys(Keys.RETURN)
print "DELAY start"
time.sleep(DELAY)
print "DELAY end"
print "Navigating to anime episode page"
driver.get(URL)
print "DELAY start"
time.sleep(DELAY)
print "DELAY end"
html = driver.page_source
soup = BeautifulSoup(html)
epListTable = soup.find("table", {"class" : "listing"})
for row in epListTable.findAll('tr'):
# each row is <td> tag enclosed
try:
episodeURLs.append("http://kissanime.com"+row.findAll('a')[0].get('href'))
except IndexError:
print "\n Obtaining episode URL's ....\n"
print "These are the episode URL's"
print episodeURLs
for url in episodeURLs:
print "\n Navigating to get Video for the URL => "+url
driver.get(url)
print "DELAY start"
time.sleep(DELAY)
print "DELAY end"
temp = []
html = driver.page_source
soup = BeautifulSoup(html)
for div in soup.findAll('div', {"id" : "divDownload"}):
links = div.findAll('a')
for link in links:
dummy = (url[url.find('?')-2:url.find('?')], link.text.strip(), link.attrs['href'])
temp.append(dummy)
print "\n\n Temp for"+link.text.strip()
print temp
downloadURLs.append(temp)
for link in downloadURLs:
print link
print "\n"
print "Copy paste the above links to a text file, use import from tezt file option in IDM to download all"
| 23.07
| 106
| 0.648895
|
acff16245aeba34df67b0b0a242e602f8b0f4900
| 2,289
|
py
|
Python
|
skl2onnx/operator_converters/voting_regressor.py
|
Alexsandruss/sklearn-onnx
|
b612557615df439e471867a676c9eca8ae4a787c
|
[
"Apache-2.0"
] | 1
|
2021-06-11T22:08:57.000Z
|
2021-06-11T22:08:57.000Z
|
skl2onnx/operator_converters/voting_regressor.py
|
ogrisel/sklearn-onnx
|
0afbe295aa3f1abbcea60f582faac31d16bd3ab0
|
[
"Apache-2.0"
] | null | null | null |
skl2onnx/operator_converters/voting_regressor.py
|
ogrisel/sklearn-onnx
|
0afbe295aa3f1abbcea60f582faac31d16bd3ab0
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
from ..common._registration import register_converter
from ..common._apply_operation import apply_mul
from ..common.data_types import (
guess_proto_type, FloatTensorType, DoubleTensorType)
from .._supported_operators import sklearn_operator_name_map
def convert_voting_regressor(scope, operator, container):
"""
Converts a *VotingRegressor* into *ONNX* format.
"""
op = operator.raw_operator
if not isinstance(operator.inputs[0].type,
(FloatTensorType, DoubleTensorType)):
this_operator = scope.declare_local_operator('SklearnCast')
this_operator.inputs = operator.inputs
var_name = scope.declare_local_variable('cast', FloatTensorType())
this_operator.outputs.append(var_name)
inputs = this_operator.outputs
else:
inputs = operator.inputs
vars_names = []
for i, estimator in enumerate(op.estimators_):
if estimator is None:
continue
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type, estimator)
this_operator.inputs = inputs
var_name = scope.declare_local_variable(
'var_%d' % i, inputs[0].type.__class__())
this_operator.outputs.append(var_name)
var_name = var_name.onnx_name
if op.weights is not None:
val = op.weights[i] / op.weights.sum()
else:
val = 1. / len(op.estimators_)
weights_name = scope.get_unique_variable_name('w%d' % i)
proto_dtype = guess_proto_type(inputs[0].type)
container.add_initializer(
weights_name, proto_dtype, [1], [val])
wvar_name = scope.get_unique_variable_name('wvar_%d' % i)
apply_mul(scope, [var_name, weights_name],
wvar_name, container, broadcast=1)
flat_name = scope.get_unique_variable_name('fvar_%d' % i)
container.add_node('Flatten', wvar_name, flat_name)
vars_names.append(flat_name)
container.add_node('Sum', vars_names,
operator.outputs[0].full_name,
name=scope.get_unique_operator_name('Sum'))
register_converter('SklearnVotingRegressor', convert_voting_regressor)
| 35.215385
| 74
| 0.670599
|
acff16f2877acb427fadf36b74155d1b85f707e5
| 1,383
|
py
|
Python
|
snippets.py
|
ThinkDownstairs/coder
|
7a42a9bac941039b96ccf2430e560cc60e2159df
|
[
"WTFPL"
] | 1
|
2018-03-20T06:01:17.000Z
|
2018-03-20T06:01:17.000Z
|
snippets.py
|
ThinkDownstairs/coder
|
7a42a9bac941039b96ccf2430e560cc60e2159df
|
[
"WTFPL"
] | 19
|
2018-03-20T23:11:38.000Z
|
2018-04-01T17:39:10.000Z
|
snippets.py
|
ThinkDownstairs/coder
|
7a42a9bac941039b96ccf2430e560cc60e2159df
|
[
"WTFPL"
] | null | null | null |
import pygame
import animations
import status
import game_objects
import consts
import color
import random
from typing import Tuple, List
class Snippet(game_objects.GameObject):
def __init__(self, status_: status.Status, mouse_pos_x: int) -> None:
super().__init__()
self._animation = animations.Snippet(random.choice([
animations.Surfaces.FIBONACCI,
animations.Surfaces.FOR_I_IN_RANGE,
animations.Surfaces.FOR_ITEM_IN_ITEMS,
animations.Surfaces.IMPORT_PYGAME,
animations.Surfaces.PRINT_HELLO_WORLD,
animations.Surfaces.REVERSE,
animations.Surfaces.SQR_LAMBDA,
animations.Surfaces.STR_JOIN,
animations.Surfaces.XY_POINT]))
self._x = mouse_pos_x - self._animation.surface.get_width() // 2
self._y = consts.SCREEN_H
self._h = self._animation.surface.get_height()
self._status = status_
def render(self, screen):
self._animation.render(screen, self._x, self._y)
def update(self, delta) -> None:
self._animation.update(delta)
if self._y < -self._h:
self.delete()
self._status.inc_point(int(((self._status.level + 1) * 2)**2) * -1)
self._y -= (delta * consts.SNIPPET_SPEED)
def _get_surface(self) -> pygame.Surface:
return self._animation.surface
| 33.731707
| 79
| 0.656544
|
acff1708fd714b8158db309bc50e6cb97409ad06
| 2,161
|
py
|
Python
|
ideaseed/authentication.py
|
ewen-lbh/ideaseed
|
ee808c16d4cf27d2b337c45d3bebb257af748eff
|
[
"MIT"
] | 2
|
2021-05-09T19:40:44.000Z
|
2021-05-16T02:52:35.000Z
|
ideaseed/authentication.py
|
ewen-lbh/ideaseed
|
ee808c16d4cf27d2b337c45d3bebb257af748eff
|
[
"MIT"
] | 202
|
2020-06-16T20:22:13.000Z
|
2021-12-05T17:34:41.000Z
|
ideaseed/authentication.py
|
ewen-lbh/ideaseed
|
ee808c16d4cf27d2b337c45d3bebb257af748eff
|
[
"MIT"
] | null | null | null |
import json
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Tuple, TypeVar
from rich import print
T = TypeVar("T")
class Cache:
def __init__(self, path: Path, service: str):
if not isinstance(path, Path):
raise TypeError("Please use a Path for the `path` argument.")
self.path = path
self.service = service
self.create()
self.cache = self.read()
def read(self) -> dict[str, Any]:
with open(self.path, "r") as file:
self.cache = json.load(file).get(self.service, {})
return self.cache
def create(self):
self.path.parent.mkdir(parents=True, exist_ok=True)
if not (self.path.exists() and self.path.read_text() != ""):
self.path.write_text("{}")
@contextmanager
def modify(self):
"""
Context manager that reads from the cache file (into `self.cache`), executes the given function, and writes back the result (from `self.cache`).
"""
with open(self.path, "r") as file:
self.cache = json.load(file)
yield
with open(self.path, "w") as file:
json.dump(self.cache, file)
def write(self, data: dict[str, Any]):
with self.modify():
self.cache |= {self.service: data}
def clear(self):
print(f"[black on yellow]Clearing [bold]{self.service}[/bold] cache...")
with self.modify():
del self.cache[self.service]
def clear_all(self):
self.path.unlink(missing_ok=True)
def login(self) -> Any:
print(f"[dim]Logging into [blue bold]{self.service}[/]...")
if self.cache:
return self.login_from_cache()
loggedin, cache_data = self.login_manually()
self.write(cache_data)
print("[dim]Logged in.")
return loggedin
def login_manually(self, **params) -> Tuple[Any, dict[str, Any]]:
raise NotImplementedError("Please implement login_manually in your subclass.")
def login_from_cache(self) -> Any:
raise NotImplementedError("Please implement login_from_cache in your subclass.")
| 30.871429
| 152
| 0.611291
|
acff17205714c38265f2c66355fdfec71ec4184d
| 5,603
|
py
|
Python
|
sdk/python/pulumi_azure_native/dbformysql/v20180601/get_private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/dbformysql/v20180601/get_private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/dbformysql/v20180601/get_private_endpoint_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointPropertyResponse']:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
A private endpoint connection
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:dbformysql/v20180601:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 41.503704
| 193
| 0.695699
|
acff180bde04673a7b76b5a711220cfe8b28ed13
| 3,638
|
py
|
Python
|
models/ro - Copy.py
|
ErikHumphrey/sustain-seq2seq
|
c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4
|
[
"Apache-2.0"
] | 4
|
2019-05-09T19:47:48.000Z
|
2020-04-11T13:58:31.000Z
|
models/ro - Copy.py
|
ErikHumphrey/sustain-seq2seq
|
c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4
|
[
"Apache-2.0"
] | null | null | null |
models/ro - Copy.py
|
ErikHumphrey/sustain-seq2seq
|
c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4
|
[
"Apache-2.0"
] | 4
|
2018-12-05T01:52:22.000Z
|
2019-11-01T01:01:52.000Z
|
import torch
import torch.nn.functional as F
import numpy as np
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer
from tqdm import trange
import logging
logging.basicConfig(level=logging.INFO)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
model.eval()
print(tokenizer.encode("Hello, my dog is cute"))
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
print(input_ids)
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
print(loss)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
while True:
raw_text = "The company has a weak policy addressing bribery and corruption."
context_tokens = tokenizer.encode(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=200,
temperature=1.,
top_k=0,
top_p=0.9,
device=torch.device("cpu")
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
print(text)
print("_"*20)
| 42.8
| 122
| 0.684992
|
acff183310c2de8404a8218e211ef28c46fb7985
| 4,393
|
py
|
Python
|
test/functional/merkle_blocks.py
|
kevinjp2000/wificoin
|
9eb95358cf612c76fd55b47fed3b824051344be8
|
[
"MIT"
] | 98
|
2018-03-26T02:51:08.000Z
|
2022-02-13T23:58:46.000Z
|
test/functional/merkle_blocks.py
|
kevinjp2000/wificoin
|
9eb95358cf612c76fd55b47fed3b824051344be8
|
[
"MIT"
] | 2
|
2018-05-16T05:47:04.000Z
|
2018-06-04T05:12:37.000Z
|
test/functional/merkle_blocks.py
|
kevinjp2000/wificoin
|
9eb95358cf612c76fd55b47fed3b824051344be8
|
[
"MIT"
] | 23
|
2018-04-02T14:36:07.000Z
|
2021-01-08T18:09:15.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The WiFicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.test_framework import WiFicoinTestFramework
from test_framework.util import *
class MerkleBlockTest(WiFicoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing
self.extra_args = [[], [], [], ["-txindex"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3])
if __name__ == '__main__':
MerkleBlockTest().main()
| 52.297619
| 142
| 0.686092
|
acff189a2aca4fb0f35b5584ea46643cfd53bd0f
| 19,220
|
py
|
Python
|
manim/animation/animation.py
|
phoreverphoenix/manim
|
ef00ae5e668907eac2ae18c19c35e78671ccfcc2
|
[
"MIT"
] | 1
|
2022-03-10T18:42:04.000Z
|
2022-03-10T18:42:04.000Z
|
manim/animation/animation.py
|
ali-alnamr/manim
|
8e6a4c48aca882e526558fd9f9e5419a8f36b876
|
[
"MIT"
] | null | null | null |
manim/animation/animation.py
|
ali-alnamr/manim
|
8e6a4c48aca882e526558fd9f9e5419a8f36b876
|
[
"MIT"
] | null | null | null |
"""Animate mobjects."""
from __future__ import annotations
from manim.mobject.opengl.opengl_mobject import OpenGLMobject
from .. import config, logger
from ..mobject import mobject
from ..mobject.mobject import Mobject
from ..mobject.opengl import opengl_mobject
from ..utils.rate_functions import smooth
__all__ = ["Animation", "Wait", "override_animation"]
from copy import deepcopy
from typing import TYPE_CHECKING, Callable, Iterable, Sequence
if TYPE_CHECKING:
from manim.scene.scene import Scene
DEFAULT_ANIMATION_RUN_TIME: float = 1.0
DEFAULT_ANIMATION_LAG_RATIO: float = 0.0
class Animation:
"""An animation.
Animations have a fixed time span.
Parameters
----------
mobject
The mobject to be animated. This is not required for all types of animations.
lag_ratio
Defines the delay after which the animation is applied to submobjects. This lag
is relative to the duration of the animation.
This does not influence the total
runtime of the animation. Instead the runtime of individual animations is
adjusted so that the complete animation has the defined run time.
run_time
The duration of the animation in seconds.
rate_func
The function defining the animation progress based on the relative runtime (see :mod:`~.rate_functions`) .
For example ``rate_func(0.5)`` is the proportion of the animation that is done
after half of the animations run time.
name
The name of the animation. This gets displayed while rendering the animation.
Defaults to <class-name>(<Mobject-name>).
remover
Whether the given mobject should be removed from the scene after this animation.
suspend_mobject_updating
Whether updaters of the mobject should be suspended during the animation.
.. NOTE::
In the current implementation of this class, the specified rate function is applied
within :meth:`.Animation.interpolate_mobject` call as part of the call to
:meth:`.Animation.interpolate_submobject`. For subclasses of :class:`.Animation`
that are implemented by overriding :meth:`interpolate_mobject`, the rate function
has to be applied manually (e.g., by passing ``self.rate_func(alpha)`` instead
of just ``alpha``).
Examples
--------
.. manim:: LagRatios
class LagRatios(Scene):
def construct(self):
ratios = [0, 0.1, 0.5, 1, 2] # demonstrated lag_ratios
# Create dot groups
group = VGroup(*[Dot() for _ in range(4)]).arrange_submobjects()
groups = VGroup(*[group.copy() for _ in ratios]).arrange_submobjects(buff=1)
self.add(groups)
# Label groups
self.add(Text("lag_ratio = ", font_size=36).next_to(groups, UP, buff=1.5))
for group, ratio in zip(groups, ratios):
self.add(Text(str(ratio), font_size=36).next_to(group, UP))
#Animate groups with different lag_ratios
self.play(AnimationGroup(*[
group.animate(lag_ratio=ratio, run_time=1.5).shift(DOWN * 2)
for group, ratio in zip(groups, ratios)
]))
# lag_ratio also works recursively on nested submobjects:
self.play(groups.animate(run_time=1, lag_ratio=0.1).shift(UP * 2))
"""
def __new__(
cls,
mobject=None,
*args,
use_override=True,
**kwargs,
):
if isinstance(mobject, Mobject) and use_override:
func = mobject.animation_override_for(cls)
if func is not None:
anim = func(mobject, *args, **kwargs)
logger.debug(
f"The {cls.__name__} animation has been is overridden for "
f"{type(mobject).__name__} mobjects. use_override = False can "
f" be used as keyword argument to prevent animation overriding.",
)
return anim
return super().__new__(cls)
def __init__(
self,
mobject: Mobject | None,
lag_ratio: float = DEFAULT_ANIMATION_LAG_RATIO,
run_time: float = DEFAULT_ANIMATION_RUN_TIME,
rate_func: Callable[[float], float] = smooth,
name: str = None,
remover: bool = False, # remove a mobject from the screen?
suspend_mobject_updating: bool = True,
introducer: bool = False,
*,
_on_finish: Callable[[], None] = lambda _: None,
**kwargs,
) -> None:
self._typecheck_input(mobject)
self.run_time: float = run_time
self.rate_func: Callable[[float], float] = rate_func
self.name: str | None = name
self.remover: bool = remover
self.introducer: bool = introducer
self.suspend_mobject_updating: bool = suspend_mobject_updating
self.lag_ratio: float = lag_ratio
self._on_finish: Callable[[Scene], None] = _on_finish
if config["renderer"] == "opengl":
self.starting_mobject: OpenGLMobject = OpenGLMobject()
self.mobject: OpenGLMobject = (
mobject if mobject is not None else OpenGLMobject()
)
else:
self.starting_mobject: Mobject = Mobject()
self.mobject: Mobject = mobject if mobject is not None else Mobject()
if kwargs:
logger.debug("Animation received extra kwargs: %s", kwargs)
if hasattr(self, "CONFIG"):
logger.error(
(
"CONFIG has been removed from ManimCommunity.",
"Please use keyword arguments instead.",
),
)
def _typecheck_input(self, mobject: Mobject | None) -> None:
if mobject is None:
logger.debug("Animation with empty mobject")
elif not isinstance(mobject, (Mobject, OpenGLMobject)):
raise TypeError("Animation only works on Mobjects")
def __str__(self) -> str:
if self.name:
return self.name
return f"{self.__class__.__name__}({str(self.mobject)})"
def __repr__(self) -> str:
return str(self)
def begin(self) -> None:
"""Begin the animation.
This method is called right as an animation is being played. As much
initialization as possible, especially any mobject copying, should live in this
method.
"""
self.starting_mobject = self.create_starting_mobject()
if self.suspend_mobject_updating:
# All calls to self.mobject's internal updaters
# during the animation, either from this Animation
# or from the surrounding scene, should do nothing.
# It is, however, okay and desirable to call
# the internal updaters of self.starting_mobject,
# or any others among self.get_all_mobjects()
self.mobject.suspend_updating()
self.interpolate(0)
def finish(self) -> None:
# TODO: begin and finish should require a scene as parameter.
# That way Animation.clean_up_from_screen and Scene.add_mobjects_from_animations
# could be removed as they fulfill basically the same purpose.
"""Finish the animation.
This method gets called when the animation is over.
"""
self.interpolate(1)
if self.suspend_mobject_updating and self.mobject is not None:
self.mobject.resume_updating()
def clean_up_from_scene(self, scene: Scene) -> None:
"""Clean up the :class:`~.Scene` after finishing the animation.
This includes to :meth:`~.Scene.remove` the Animation's
:class:`~.Mobject` if the animation is a remover.
Parameters
----------
scene
The scene the animation should be cleaned up from.
"""
self._on_finish(scene)
if self.is_remover():
scene.remove(self.mobject)
def _setup_scene(self, scene: Scene) -> None:
"""Setup up the :class:`~.Scene` before starting the animation.
This includes to :meth:`~.Scene.add` the Animation's
:class:`~.Mobject` if the animation is an introducer.
Parameters
----------
scene
The scene the animation should be cleaned up from.
"""
if scene is None:
return
if (
self.is_introducer()
and self.mobject not in scene.get_mobject_family_members()
):
scene.add(self.mobject)
def create_starting_mobject(self) -> Mobject:
# Keep track of where the mobject starts
return self.mobject.copy()
def get_all_mobjects(self) -> Sequence[Mobject]:
"""Get all mobjects involved in the animation.
Ordering must match the ordering of arguments to interpolate_submobject
Returns
-------
Sequence[Mobject]
The sequence of mobjects.
"""
return self.mobject, self.starting_mobject
def get_all_families_zipped(self) -> Iterable[tuple]:
if config["renderer"] == "opengl":
return zip(*(mob.get_family() for mob in self.get_all_mobjects()))
return zip(
*(mob.family_members_with_points() for mob in self.get_all_mobjects())
)
def update_mobjects(self, dt: float) -> None:
"""
Updates things like starting_mobject, and (for
Transforms) target_mobject. Note, since typically
(always?) self.mobject will have its updating
suspended during the animation, this will do
nothing to self.mobject.
"""
for mob in self.get_all_mobjects_to_update():
mob.update(dt)
def get_all_mobjects_to_update(self) -> list[Mobject]:
"""Get all mobjects to be updated during the animation.
Returns
-------
List[Mobject]
The list of mobjects to be updated during the animation.
"""
# The surrounding scene typically handles
# updating of self.mobject. Besides, in
# most cases its updating is suspended anyway
return list(filter(lambda m: m is not self.mobject, self.get_all_mobjects()))
def copy(self) -> Animation:
"""Create a copy of the animation.
Returns
-------
Animation
A copy of ``self``
"""
return deepcopy(self)
# Methods for interpolation, the mean of an Animation
# TODO: stop using alpha as parameter name in different meanings.
def interpolate(self, alpha: float) -> None:
"""Set the animation progress.
This method gets called for every frame during an animation.
Parameters
----------
alpha
The relative time to set the animation to, 0 meaning the start, 1 meaning
the end.
"""
self.interpolate_mobject(alpha)
def interpolate_mobject(self, alpha: float) -> None:
"""Interpolates the mobject of the :class:`Animation` based on alpha value.
Parameters
----------
alpha
A float between 0 and 1 expressing the ratio to which the animation
is completed. For example, alpha-values of 0, 0.5, and 1 correspond
to the animation being completed 0%, 50%, and 100%, respectively.
"""
families = list(self.get_all_families_zipped())
for i, mobs in enumerate(families):
sub_alpha = self.get_sub_alpha(alpha, i, len(families))
self.interpolate_submobject(*mobs, sub_alpha)
def interpolate_submobject(
self,
submobject: Mobject,
starting_submobject: Mobject,
# target_copy: Mobject, #Todo: fix - signature of interpolate_submobject differs in Transform().
alpha: float,
) -> Animation:
# Typically implemented by subclass
pass
def get_sub_alpha(self, alpha: float, index: int, num_submobjects: int) -> float:
"""Get the animation progress of any submobjects subanimation.
Parameters
----------
alpha
The overall animation progress
index
The index of the subanimation.
num_submobjects
The total count of subanimations.
Returns
-------
float
The progress of the subanimation.
"""
# TODO, make this more understandable, and/or combine
# its functionality with AnimationGroup's method
# build_animations_with_timings
lag_ratio = self.lag_ratio
full_length = (num_submobjects - 1) * lag_ratio + 1
value = alpha * full_length
lower = index * lag_ratio
return self.rate_func(value - lower)
# Getters and setters
def set_run_time(self, run_time: float) -> Animation:
"""Set the run time of the animation.
Parameters
----------
run_time
The new time the animation should take in seconds.
.. note::
The run_time of an animation should not be changed while it is already
running.
Returns
-------
Animation
``self``
"""
self.run_time = run_time
return self
def get_run_time(self) -> float:
"""Get the run time of the animation.
Returns
-------
float
The time the animation takes in seconds.
"""
return self.run_time
def set_rate_func(
self,
rate_func: Callable[[float], float],
) -> Animation:
"""Set the rate function of the animation.
Parameters
----------
run_time
The new time the animation should take in seconds.
Returns
-------
Animation
``self``
"""
self.rate_func = rate_func
return self
def get_rate_func(
self,
) -> Callable[[float], float]:
"""Get the rate function of the animation.
Returns
-------
Callable[[float], float]
The rate function of the animation.
"""
return self.rate_func
def set_name(self, name: str) -> Animation:
"""Set the name of the animation.
Parameters
----------
name
The new name of the animation.
Returns
-------
Animation
``self``
"""
self.name = name
return self
def is_remover(self) -> bool:
"""Test if a the animation is a remover.
Returns
-------
bool
``True`` if the animation is a remover, ``False`` otherwise.
"""
return self.remover
def is_introducer(self) -> bool:
"""Test if a the animation is a remover.
Returns
-------
bool
``True`` if the animation is a remover, ``False`` otherwise.
"""
return self.introducer
def prepare_animation(
anim: Animation | mobject._AnimationBuilder,
) -> Animation:
r"""Returns either an unchanged animation, or the animation built
from a passed animation factory.
Examples
--------
::
>>> from manim import Square, FadeIn
>>> s = Square()
>>> prepare_animation(FadeIn(s))
FadeIn(Square)
::
>>> prepare_animation(s.animate.scale(2).rotate(42))
_MethodAnimation(Square)
::
>>> prepare_animation(42)
Traceback (most recent call last):
...
TypeError: Object 42 cannot be converted to an animation
"""
if isinstance(anim, mobject._AnimationBuilder):
return anim.build()
if isinstance(anim, opengl_mobject._AnimationBuilder):
return anim.build()
if isinstance(anim, Animation):
return anim
raise TypeError(f"Object {anim} cannot be converted to an animation")
class Wait(Animation):
"""A "no operation" animation.
Parameters
----------
run_time
The amount of time that should pass.
stop_condition
A function without positional arguments that evaluates to a boolean.
The function is evaluated after every new frame has been rendered.
Playing the animation only stops after the return value is truthy.
Overrides the specified ``run_time``.
frozen_frame
Controls whether or not the wait animation is static, i.e., corresponds
to a frozen frame. If ``False`` is passed, the render loop still
progresses through the animation as usual and (among other things)
continues to call updater functions. If ``None`` (the default value),
the :meth:`.Scene.play` call tries to determine whether the Wait call
can be static or not itself via :meth:`.Scene.should_mobjects_update`.
kwargs
Keyword arguments to be passed to the parent class, :class:`.Animation`.
"""
def __init__(
self,
run_time: float = 1,
stop_condition: Callable[[], bool] | None = None,
frozen_frame: bool | None = None,
**kwargs,
):
if stop_condition and frozen_frame:
raise ValueError("A static Wait animation cannot have a stop condition.")
self.duration: float = run_time
self.stop_condition = stop_condition
self.is_static_wait: bool = frozen_frame
super().__init__(None, run_time=run_time, **kwargs)
# quick fix to work in opengl setting:
self.mobject.shader_wrapper_list = []
def begin(self) -> None:
pass
def finish(self) -> None:
pass
def clean_up_from_scene(self, scene: Scene) -> None:
pass
def update_mobjects(self, dt: float) -> None:
pass
def interpolate(self, alpha: float) -> None:
pass
def override_animation(
animation_class: type[Animation],
) -> Callable[[Callable], Callable]:
"""Decorator used to mark methods as overrides for specific :class:`~.Animation` types.
Should only be used to decorate methods of classes derived from :class:`~.Mobject`.
``Animation`` overrides get inherited to subclasses of the ``Mobject`` who defined
them. They don't override subclasses of the ``Animation`` they override.
See Also
--------
:meth:`~.Mobject.add_animation_override`
Parameters
----------
animation_class
The animation to be overridden.
Returns
-------
Callable[[Callable], Callable]
The actual decorator. This marks the method as overriding an animation.
Examples
--------
.. manim:: OverrideAnimationExample
class MySquare(Square):
@override_animation(FadeIn)
def _fade_in_override(self, **kwargs):
return Create(self, **kwargs)
class OverrideAnimationExample(Scene):
def construct(self):
self.play(FadeIn(MySquare()))
"""
def decorator(func):
func._override_animation = animation_class
return func
return decorator
| 31.611842
| 115
| 0.603798
|
acff19c6988286d1e70c9e469174e8c17f659ab9
| 625
|
py
|
Python
|
bugs/issue-197-empty-selected-fnc-body/test.py
|
stepanek-m/retdec-regression-tests
|
12b834b14ede2826fec451368fa8192ab00ddadf
|
[
"MIT"
] | null | null | null |
bugs/issue-197-empty-selected-fnc-body/test.py
|
stepanek-m/retdec-regression-tests
|
12b834b14ede2826fec451368fa8192ab00ddadf
|
[
"MIT"
] | null | null | null |
bugs/issue-197-empty-selected-fnc-body/test.py
|
stepanek-m/retdec-regression-tests
|
12b834b14ede2826fec451368fa8192ab00ddadf
|
[
"MIT"
] | null | null | null |
import re
from regression_tests import *
class Test(Test):
settings=TestSettings(
input='arm-elf-09923a6e40662aab0ad2a1096f802f08',
args='--select-functions=LzmaProps_Decode'
)
def test_selected_function(self):
assert self.out_c.has_comment_matching('// Address range: 0x7148 - 0x71b4')
assert self.out_c.has_just_funcs('LzmaProps_Decode')
assert self.out_c.contains('a3 >= 4 != a3 != 4')
assert self.out_c.contains('v[0-9]+ < .*&g[0-9]+')
assert self.out_c.contains('v[0-9]+ >= 224 == \(v[0-9]+ != -32\)')
assert self.out_c.contains('return 4')
| 34.722222
| 83
| 0.64
|
acff19d177697db747bcd9e187cc727e39ec3c87
| 1,921
|
py
|
Python
|
satchless/util/tests.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
satchless/util/tests.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
satchless/util/tests.py
|
bartels/satchless
|
4d333014333dc4fd5815f9e0bbea565959919a30
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase, Client
class BaseTestCase(TestCase):
def _setup_settings(self, custom_settings):
original_settings = {}
for setting_name, value in custom_settings.items():
if hasattr(settings, setting_name):
original_settings[setting_name] = getattr(settings,
setting_name)
setattr(settings, setting_name, value)
return original_settings
def _teardown_settings(self, original_settings, custom_settings=None):
custom_settings = custom_settings or {}
for setting_name, value in original_settings.items():
setattr(settings, setting_name, value)
if setting_name in custom_settings:
del custom_settings[setting_name]
for setting_name, value in custom_settings.items():
delattr(settings, setting_name)
class ViewsTestCase(BaseTestCase):
def _test_status(self, url, method='get', *args, **kwargs):
status_code = kwargs.pop('status_code', 200)
client = kwargs.pop('client_instance', Client())
data = kwargs.pop('data', {})
response = getattr(client, method)(url, data=data, follow=False)
self.assertEqual(response.status_code, status_code,
'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % (
url.decode('utf-8'), args, kwargs, status_code, response.status_code,
response.content.decode('utf-8')))
return response
def _test_GET_status(self, url, *args, **kwargs):
return self._test_status(url, 'get', *args, **kwargs)
def _test_POST_status(self, url, *args, **kwargs):
kwargs['status_code'] = kwargs.get('status_code', 302)
return self._test_status(url, 'post', *args, **kwargs)
| 43.659091
| 98
| 0.633004
|
acff19d1c74d97ba0a95ea147de47dc6aa952d99
| 3,095
|
py
|
Python
|
api-ref/source/conf.py
|
kud-aa/openstackdocstheme
|
5346718ddee29043adbf5a6b224ccc234b34ece1
|
[
"Apache-2.0"
] | 39
|
2015-02-06T07:32:30.000Z
|
2020-05-24T10:30:00.000Z
|
api-ref/source/conf.py
|
kud-aa/openstackdocstheme
|
5346718ddee29043adbf5a6b224ccc234b34ece1
|
[
"Apache-2.0"
] | null | null | null |
api-ref/source/conf.py
|
kud-aa/openstackdocstheme
|
5346718ddee29043adbf5a6b224ccc234b34ece1
|
[
"Apache-2.0"
] | 6
|
2015-03-02T15:14:32.000Z
|
2022-03-14T14:54:00.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'os_api_ref',
'openstackdocstheme'
]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# "project" contains the name of the book, such as
# 'security guide' or 'network guide'
# It's used by the "log-a-bug" button on each page
# and should ultimately be set automatically by the build process
project = u'Test API Reference'
copyright = u'2018, OpenStack Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# "version" and "release" are used by the "log-a-bug" feature
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# A few variables have to be set for the log-a-bug feature.
openstackdocs_bug_tag = "doc-builds"
openstackdocs_bug_project = 'openstack-doc-tools'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# To use the API Reference sidebar dropdown menu,
# uncomment the html_theme_options parameter. The theme
# variable, sidebar_dropdown, should be set to `api_ref`.
# Otherwise, the list of links for the User and Ops docs
# appear in the sidebar dropdown menu.
html_theme_options = {"sidebar_dropdown": "api_ref",
"display_badge": False,
"sidebar_mode": "toc"}
| 34.388889
| 78
| 0.713409
|
acff1a10e2aa1485712212295c43806d9c34644f
| 627
|
py
|
Python
|
loader/__init__.py
|
JasonkayZK/GitHubPoster
|
959520a9696cb06b0d4927fe1a4d48e09e7ca8ac
|
[
"MIT"
] | 1
|
2021-06-15T10:53:15.000Z
|
2021-06-15T10:53:15.000Z
|
loader/__init__.py
|
JasonkayZK/GitHubPoster
|
959520a9696cb06b0d4927fe1a4d48e09e7ca8ac
|
[
"MIT"
] | null | null | null |
loader/__init__.py
|
JasonkayZK/GitHubPoster
|
959520a9696cb06b0d4927fe1a4d48e09e7ca8ac
|
[
"MIT"
] | null | null | null |
from .bilibili_loader import BilibiliLoader
from .cichang_loader import CiChangLoader
from .duolingo_loader import DuolingoLoader
from .from_github_issue_loader import GitHubIssuesLoader
from .github_loader import GitHubLoader
from .gitlab_loader import GitLabLoader
from .gpx_loader import GPXLoader
from .kindle_loader import KindleLoader
from .leetcode_loader import LeetcodeLoader
from .ns_loader import NSLoader
from .shanbay_loader import ShanBayLoader
from .strava_loader import StravaLoader
from .twitter_loader import TwitterLoader
from .wakatime_loader import WakaTimeLoader
from .youtube_loader import YouTubeLoader
| 39.1875
| 56
| 0.880383
|
acff1c5124d8f7aee7cb92661feee12054f22dbc
| 9,132
|
py
|
Python
|
emlo-edit-php-helper/tweaker/tweak_add_works_people_places_manifestations_repository.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | null | null | null |
emlo-edit-php-helper/tweaker/tweak_add_works_people_places_manifestations_repository.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | null | null | null |
emlo-edit-php-helper/tweaker/tweak_add_works_people_places_manifestations_repository.py
|
culturesofknowledge/site-editor
|
9a74580d2567755ab068a2d8761df8f81718910e
|
[
"MIT"
] | 1
|
2021-11-15T13:19:28.000Z
|
2021-11-15T13:19:28.000Z
|
from __future__ import print_function
from tweaker.tweaker import DatabaseTweaker
from config import config
import sys
def main() :
do_commit = ( raw_input("Commit changes to database (y/n): ") == "y")
if do_commit:
print( "COMMITTING changes to database." )
else:
print( "NOT committing changes to database." )
tweaker = DatabaseTweaker.tweaker_from_connection( config["dbname"], config["host"], config["port"], config["user"], config["password"] )
# tweaker.set_debug(True)
people = []
places = []
works = []
original_catalogue = 'ELRS'
repository_id = 135 # The Royal Society
csv_rows = tweaker.get_csv_data( "resources/royal_society/people.csv" )
# csv_rows = csv_rows[:5]
count = countdown = len(csv_rows)
# Create people
#
print( "Create People" )
skip_first_row = True
for csv_row in csv_rows:
if skip_first_row:
skip_first_row = False
continue
print( str(countdown) + " of " + str(count), ":", csv_row["primary_name"] )
name_clean = standardise_name( csv_row["primary_name"])
name_tweaked = name_clean.replace("RSEL_", "$RSEL ")
person_id = tweaker.create_person_or_organisation(
name_tweaked,
editors_note=csv_row["editors_notes"]
)
people.append( {
"id": person_id,
"primary_name" : name_clean.lower()
} )
countdown -= 1
csv_rows = tweaker.get_csv_data( "resources/royal_society/places.csv" )
# csv_rows = csv_rows[:5]
count = countdown = len(csv_rows)
# Create Places
#
print( "Create Places" )
skip_first_row = True
for csv_row in csv_rows:
if skip_first_row:
skip_first_row = False
continue
print( str(countdown) + " of " + str(count), ":", csv_row["location_name"] )
name_clean = standardise_name( csv_row["location_name"])
name_tweaked = name_clean.replace("RSEL_", "$RSEL ")
place_id = tweaker.create_location(
element_4_eg_city=name_tweaked
)
places.append( {
"id": place_id,
"location_name" : name_clean.lower()
} )
countdown -= 1
csv_rows = tweaker.get_csv_data( "resources/royal_society/works.csv" )
# csv_rows = csv_rows[:5]
count = countdown = len(csv_rows)
# Create Works
#
print( "Create Works" )
skip_first_row = True
for cr in csv_rows:
if skip_first_row:
skip_first_row = False
continue
print( str(countdown) + " of " + str(count), ":", cr["iwork_id"] )
languages = None
if cr['language_id'] :
languages = tweaker.get_languages_from_code( cr['language_id'] )
work_id = tweaker.create_work(
cr["iwork_id"],
abstract=cr['abstract'],
accession_code='The Royal Society, London, 9 November 2018',
addressees_as_marked=cr['addressees_as_marked'],
addressees_inferred=cr['addressees_inferred'],
addressees_uncertain=cr['addressees_uncertain'],
authors_as_marked=cr['authors_as_marked'],
authors_inferred=cr['authors_inferred'],
authors_uncertain=cr['authors_uncertain'],
date_of_work2_std_day=cr['date_of_work2_std_day'],
date_of_work2_std_month=cr['date_of_work2_std_month'],
date_of_work2_std_year=cr['date_of_work2_std_year'],
date_of_work_approx=cr['date_of_work_approx'],
date_of_work_as_marked=cr['date_of_work_as_marked'],
date_of_work_inferred=cr['date_of_work_inferred'],
date_of_work_std_day=cr['date_of_work_std_day'],
date_of_work_std_is_range=cr['date_of_work_std_is_range'],
date_of_work_std_month=cr['date_of_work_std_month'],
date_of_work_std_year=cr['date_of_work_std_year'],
date_of_work_uncertain=cr['date_of_work_uncertain'],
destination_as_marked=cr['destination_as_marked'],
destination_inferred=cr['destination_inferred'],
destination_uncertain=cr['destination_uncertain'],
editors_notes=cr['editors_notes'],
explicit=cr['excipit'],
incipit=cr['incipit'],
keywords=cr['keywords'],
language_of_work=languages,
origin_as_marked=cr['origin_as_marked'],
origin_inferred=cr['origin_inferred'],
origin_uncertain=cr['origin_uncertain'],
original_calendar=cr['original_calendar'],
original_catalogue=original_catalogue
)
works.append( {
"id": work_id,
"csv_id": cr["iwork_id"],
} )
# author_ids
# author_names
if cr["author_names"]:
person_id = get_person_id_from_primary_name( people, standardise_name( cr["author_names"]))
tweaker.create_relationship_created( person_id, work_id )
# addressee_ids
# addressee_names
if cr["addressee_names"] :
person_id = get_person_id_from_primary_name( people, standardise_name( cr["addressee_names"]))
tweaker.create_relationship_addressed_to( work_id, person_id, )
# mention_id
# emlo_mention_id
if cr["mention_id"]:
person_id = get_person_id_from_primary_name( people, standardise_name( cr["mention_id"] ))
tweaker.create_relationship_mentions( work_id, person_id, )
# origin_id
# origin_name
if cr["origin_name"] :
location_id = get_location_id_from_location_name( places, standardise_name( cr["origin_name"] ))
tweaker.create_relationship_was_sent_from( work_id, location_id )
# destination_id
# destination_name
if cr["destination_name"] :
location_id = get_location_id_from_location_name( places, standardise_name( cr["destination_name"] ))
tweaker.create_relationship_was_sent_to( work_id, location_id )
# resource_name
# resource_url
# resource_details
if cr["resource_name"] and cr["resource_url"] :
resource_id = tweaker.create_resource( cr['resource_name'], cr['resource_url'], cr['resource_details'] )
tweaker.create_relationship_work_resource( work_id, resource_id )
# answererby
if cr["answererby"] :
# tweaker.create_relationship_work_reply_to
print( "Error: not handled answererby yet, which is the reply!?")
# notes_on_date_of_work
if cr['notes_on_date_of_work'] :
comment_id = tweaker.create_comment( cr['notes_on_date_of_work'] )
tweaker.create_relationship_note_on_work_date( comment_id, work_id )
# notes_on_addressees
if cr['notes_on_addressees'] :
comment_id = tweaker.create_comment( cr['notes_on_addressees'] )
tweaker.create_relationship_note_on_work_addressee( comment_id, work_id )
# notes_on_letter
if cr['notes_on_letter'] :
comment_id = tweaker.create_comment( cr['notes_on_letter'] )
tweaker.create_relationship_note_on_work_generally( comment_id, work_id )
# notes_on_people_mentioned
if cr['notes_on_people_mentioned'] :
comment_id = tweaker.create_comment( cr['notes_on_people_mentioned'] )
tweaker.create_relationship_note_on_work_people_mentioned( comment_id, work_id )
# notes_on_authors
if cr['notes_on_authors'] :
comment_id = tweaker.create_comment( cr['notes_on_authors'] )
tweaker.create_relationship_note_on_work_author( comment_id, work_id )
countdown -= 1
csv_rows = tweaker.get_csv_data( "resources/royal_society/manifestations.csv" )
#csv_rows = csv_rows[:5]
count = countdown = len(csv_rows)
# Create Works
#
print( "Create Manifestations" )
skip_first_row = True
man_id_end = 'abcdefghijklmnopqrstuvwxyz'
work_id_manfestations = {}
for cr in csv_rows:
if skip_first_row:
skip_first_row = False
continue
print( str(countdown) + " of " + str(count), ":", cr["manifestation_id"] )
work_id = get_work_id_from_csv_id( works, cr['iwork_id'] )
if work_id not in work_id_manfestations :
work_id_manfestations[work_id] = 0
else :
work_id_manfestations[work_id] += 1
if work_id_manfestations[work_id] >= len(man_id_end) :
print( "Error: need more man id generation space")
sys.exit()
manifestation_id = work_id + "-" + man_id_end[work_id_manfestations[work_id]]
tweaker.create_manifestation( manifestation_id,
cr['manifestation_type'],
id_number_or_shelfmark=cr['id_number_or_shelfmark']
)
# repository_id
tweaker.create_relationship_manifestation_in_repository( manifestation_id, repository_id)
# iwork_id
tweaker.create_relationship_manifestation_of_work( manifestation_id, work_id )
# manifestation_notes
if cr['manifestation_notes'] :
comment_id = tweaker.create_comment( cr['manifestation_notes'] )
tweaker.create_relationship_note_manifestation( comment_id, manifestation_id )
countdown -= 1
print()
tweaker.print_audit()
tweaker.commit_changes(do_commit)
print( "Fini" )
def standardise_name( name) :
name = name.replace("RSEL_", "" ) # remove if there
name = "RSEL_" + name.strip() # strip space and add back on
return name
def get_work_id_from_csv_id( works, csv_id ) :
for work in works:
if work['csv_id'] == csv_id:
return work['id']
print( "Error csv_id " + csv_id + " not found!")
return None
def get_person_id_from_primary_name( people, name ) :
name_lower = name.lower()
for person in people:
if person['primary_name'] == name_lower:
return person['id']
print( "Error Name " + name_lower + " not found!")
return None
def get_location_id_from_location_name( places, name ) :
name_lower = name.lower()
for place in places:
if place['location_name'] == name_lower:
return place['id']
print( "Error Name " + name_lower + " not found!")
return None
if __name__ == '__main__':
print( "Starting main()")
main()
print( "Finished main()")
| 27.926606
| 138
| 0.736531
|
acff1c6ec895a0177f628b6b3112b85cbc951139
| 3,878
|
py
|
Python
|
select_mask.py
|
stockmann-lab/ASL_coil
|
14260ca18b4a13fb9290b0507a4279d9e4023665
|
[
"MIT"
] | null | null | null |
select_mask.py
|
stockmann-lab/ASL_coil
|
14260ca18b4a13fb9290b0507a4279d9e4023665
|
[
"MIT"
] | 10
|
2021-01-04T01:45:52.000Z
|
2021-03-04T17:57:11.000Z
|
select_mask.py
|
stockmann-lab/ASL_coil
|
14260ca18b4a13fb9290b0507a4279d9e4023665
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.io as sio
from slice_plotter import Slice_Plotter
from slice_plotter import quick_slice_plot
from matplotlib.widgets import RectangleSelector
from matplotlib.patches import Rectangle
def select_callback_link(link_target, plotter):
def select_callback(eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
plotter.pop_patch(link_target[4])
link_target[0:5] = (x1, x2, y1, y2, plotter.ind)
patch = Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, color='r')
plotter.add_patch(patch, plotter.ind)
return select_callback
def close_on_enter(event):
if event.key == 'enter':
plt.close()
if event.key == 'q':
print('Quitting...')
quit()
def select_mask(mag, mag_mask, z_range=3, title_id=None, int_output=False, confirm=True, mask_type='int32'):
cmap = plt.get_cmap('bone')
cmap.set_bad('black')
good = False
artery_mask = np.zeros_like(mag, dtype='float32')
title = f'Magnitude -- select target region'
if title_id is not None:
title = title_id + ' ' + title
while not good:
rect_vars = np.zeros(5).astype('int32')
while np.all(rect_vars == 0):
mag_fig, mag_ax = plt.subplots(1, 1)
mag_plotter = Slice_Plotter(mag_ax, np.transpose(mag * mag_mask, axes=(1, 0, 2)), title=title, cmap=cmap)
mag_fig.canvas.mpl_connect('scroll_event', mag_plotter.onscroll)
selector = RectangleSelector(mag_ax, select_callback_link(rect_vars, mag_plotter),
drawtype='box', useblit=True,
button=[1], # left click only
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
mag_fig.canvas.mpl_connect('scroll_event', mag_plotter.onscroll)
mag_fig.canvas.mpl_connect('key_press_event', close_on_enter)
print(f'Locate arteries on labeling plane')
plt.show()
plt.close()
del selector
x1 = int(np.floor(np.min(rect_vars[0:2])))
x2 = int(np.ceil(np.max(rect_vars[0:2])))
y1 = int(np.floor(np.min(rect_vars[2:4])))
y2 = int(np.ceil(np.max(rect_vars[2:4])))
z = rect_vars[4]
x_slice = slice(x1, x2 + 1)
y_slice = slice(y1, y2 + 1)
z_slice = slice(max(0, z - (z_range - 1) // 2), min(mag.shape[2] - 1, z + z_range // 2 + 1))
artery_mask[x_slice, y_slice, z_slice] = 1
artery_mask[artery_mask == 0] = np.nan
artery_mask = artery_mask * mag_mask
mag_fig, mag_ax = plt.subplots(1, 1)
mag_plotter = Slice_Plotter(mag_ax, np.transpose((mag * artery_mask)[..., z_slice], axes=(1, 0, 2)), f'Target mask', cmap=cmap)
mag_fig.canvas.mpl_connect('scroll_event', mag_plotter.onscroll)
mag_fig.canvas.mpl_connect('key_press_event', close_on_enter)
print('Confirm mask -- close when done')
plt.show(block=True)
plt.close()
valid = False
if not confirm:
valid = True
good = True
while not valid:
confirm = input('Good? [Y/N/Q]: ').upper()
valid = True
if confirm == 'Y':
good = True
elif confirm == 'N':
good = False
elif confirm == 'Q':
print('Quitting...')
quit()
else:
valid = False
if int_output:
artery_mask[artery_mask == np.nan] = 0
artery_mask = artery_mask.astype(mask_type)
return artery_mask
| 34.936937
| 135
| 0.570913
|
acff1d71ebaeb0ed2afef261be0b9042afaea5e8
| 29,232
|
py
|
Python
|
redmapper/background.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | null | null | null |
redmapper/background.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | null | null | null |
redmapper/background.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | null | null | null |
"""Galaxy background classes for redmapper.
This file contains classes to describe the b(x) background terms for computing
richness and other redmapper likelihoods.
"""
import fitsio
import numpy as np
import healpy as hp
import time
import copy
import os
import esutil
from tqdm import tqdm
import multiprocessing
import types
try:
import copy_reg as copyreg
except ImportError:
import copyreg
from .catalog import Entry
from .galaxy import GalaxyCatalog
from .redsequence import RedSequenceColorPar
from .depthmap import DepthMap
from .utilities import interpol, cic
from .utilities import _pickle_method
copyreg.pickle(types.MethodType, _pickle_method)
class Background(object):
"""
Galaxy background class.
This class describes the binned, interpolateable background term b(x), where
x describes the redshift, chi-squared, and reference magnitude of the galaxy.
This is used in regular richness calculations.
"""
def __init__(self, filename):
"""
Instantiate a Background
Parameters
----------
filename: `string`
Background filename
"""
obkg = Entry.from_fits_file(filename, ext='CHISQBKG')
# Set the bin size in redshift, chisq and refmag spaces
self.zbinsize = 0.001
self.chisqbinsize = 0.5
self.refmagbinsize = 0.01
# Create the refmag bins
refmagbins = np.arange(obkg.refmagrange[0], obkg.refmagrange[1], self.refmagbinsize)
nrefmagbins = refmagbins.size
# Create the chisq bins
nchisqbins = obkg.chisqbins.size
nlnchisqbins = obkg.lnchisqbins.size
# Read out the number of redshift bins from the object background
nzbins = obkg.zbins.size
# Set up some arrays to populate
sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))
sigma_lng_new = np.zeros((nrefmagbins, nchisqbins, nzbins))
# Do linear interpolation to get the sigma_g value
# between the raw background points.
# If any values are less than 0 then turn them into 0.
for i in range(nzbins):
for j in range(nchisqbins):
sigma_g_new[:,j,i] = np.interp(refmagbins, obkg.refmagbins, obkg.sigma_g[:,j,i])
sigma_g_new[:,j,i] = np.where(sigma_g_new[:,j,i] < 0, 0, sigma_g_new[:,j,i])
sigma_lng_new[:,j,i] = np.interp(refmagbins, obkg.refmagbins, obkg.sigma_lng[:,j,i])
sigma_lng_new[:,j,i] = np.where(sigma_lng_new[:,j,i] < 0, 0, sigma_lng_new[:,j,i])
sigma_g = sigma_g_new.copy()
sigma_lng = sigma_lng_new.copy()
chisqbins = np.arange(obkg.chisqrange[0], obkg.chisqrange[1], self.chisqbinsize)
nchisqbins = chisqbins.size
sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))
# Now do the interpolation in chisq space
for i in range(nzbins):
for j in range(nrefmagbins):
sigma_g_new[j,:,i] = np.interp(chisqbins, obkg.chisqbins, sigma_g[j,:,i])
sigma_g_new[j,:,i] = np.where(sigma_g_new[j,:,i] < 0, 0, sigma_g_new[j,:,i])
sigma_g = sigma_g_new.copy()
zbins = np.arange(obkg.zrange[0], obkg.zrange[1], self.zbinsize)
nzbins = zbins.size
sigma_g_new = np.zeros((nrefmagbins, nchisqbins, nzbins))
sigma_lng_new = np.zeros((nrefmagbins, nlnchisqbins, nzbins))
# Now do the interpolation in redshift space
for i in range(nchisqbins):
for j in range(nrefmagbins):
sigma_g_new[j,i,:] = np.interp(zbins, obkg.zbins, sigma_g[j,i,:])
sigma_g_new[j,i,:] = np.where(sigma_g_new[j,i,:] < 0, 0, sigma_g_new[j,i,:])
for i in range(nlnchisqbins):
for j in range(nrefmagbins):
sigma_lng_new[j,i,:] = np.interp(zbins, obkg.zbins, sigma_lng[j,i,:])
sigma_lng_new[j,i,:] = np.where(sigma_lng_new[j,i,:] < 0, 0, sigma_lng_new[j,i,:])
n_new = np.zeros((nrefmagbins, nzbins))
for i in range(nzbins):
n_new[:,i] = np.sum(sigma_g_new[:,:,i], axis=1) * self.chisqbinsize
# Save all meaningful fields
# to be attributes of the background object.
self.refmagbins = refmagbins
self.chisqbins = chisqbins
self.lnchisqbins = obkg.lnchisqbins
self.zbins = zbins
self.sigma_g = sigma_g_new
self.sigma_lng = sigma_lng_new
self.n = n_new
def sigma_g_lookup(self, z, chisq, refmag, allow0=False):
"""
Look up the Sigma_g(z, chisq, refmag) background quantity for matched filter
Parameters
----------
z: `np.array`
redshifts of galaxies
chisq: `np.array`
chi-squared values of galaxies
refmag: `np.array`
reference magnitudes of galaxies
allow0: `bool`, optional
Flag to allow Sigma_g(x) to be zero. Otherwise will set to infinity
where there is no data. Default is False.
Returns
-------
sigma_g: `np.array`
Sigma_g(x) for input values
"""
zmin = self.zbins[0]
chisqindex = np.searchsorted(self.chisqbins, chisq) - 1
refmagindex = np.searchsorted(self.refmagbins, refmag) - 1
# Look into changing to searchsorted
ind = np.clip(np.round((z-zmin)/(self.zbins[1]-zmin)),0, self.zbins.size-1).astype(np.int32)
badchisq, = np.where((chisq < self.chisqbins[0]) |
(chisq > (self.chisqbins[-1] + self.chisqbinsize)))
badrefmag, = np.where((refmag <= self.refmagbins[0]) |
(refmag > (self.refmagbins[-1] + self.refmagbinsize)))
chisqindex[badchisq] = 0
refmagindex[badrefmag] = 0
zindex = np.full_like(chisqindex, ind)
lookup_vals = self.sigma_g[refmagindex, chisqindex, zindex]
lookup_vals[badchisq] = np.inf
lookup_vals[badrefmag] = np.inf
if not allow0:
lookup_vals[lookup_vals == 0.0] = np.inf
return lookup_vals
class ZredBackground(object):
"""
Zred background class.
This class describes the binned, interpolateable background term b(x), where
x describes the zred and reference magnitude of the galaxy.
This is used in centering calculations.
"""
def __init__(self, filename):
"""
Instantiate a Zred Background
Parameters
----------
filename: `string`
Zred background filename
"""
obkg = Entry.from_fits_file(filename, ext='ZREDBKG')
# Will want to make configurable
self.refmagbinsize = 0.01
self.zredbinsize = 0.001
# Create the refmag bins
refmagbins = np.arange(obkg.refmagrange[0], obkg.refmagrange[1], self.refmagbinsize)
nrefmagbins = refmagbins.size
# Leave the zred bins the same
nzredbins = obkg.zredbins.size
# Set up arrays to populate
sigma_g_new = np.zeros((nrefmagbins, nzredbins))
floor = np.min(obkg.sigma_g)
for i in range(nzredbins):
sigma_g_new[:, i] = np.clip(interpol(obkg.sigma_g[:, i], obkg.refmagbins, refmagbins), floor, None)
sigma_g = sigma_g_new.copy()
# And update zred
zredbins = np.arange(obkg.zredrange[0], obkg.zredrange[1], self.zredbinsize)
nzredbins = zredbins.size
sigma_g_new = np.zeros((nrefmagbins, nzredbins))
for i in range(nrefmagbins):
sigma_g_new[i, :] = np.clip(interpol(sigma_g[i, :], obkg.zredbins, zredbins), floor, None)
self.zredbins = zredbins
self.zredrange = obkg.zredrange
self.zred_index = 0
self.refmag_index = 1
self.refmagbins = refmagbins
self.refmagrange = obkg.refmagrange
self.sigma_g = sigma_g_new
def sigma_g_lookup(self, zred, refmag):
"""
Look up the Sigma_g(zred, refmag) background quantity for centering calculations
Parameters
----------
zred: `np.array`
zred redshifts of galaxies
refmag: `np.array`
reference magnitudes of galaxies
Returns
-------
sigma_g: `np.array`
Sigma_g(x) for input values
"""
zredindex = np.searchsorted(self.zredbins, zred) - 1
refmagindex = np.searchsorted(self.refmagbins, refmag) - 1
badzred, = np.where((zredindex < 0) |
(zredindex >= self.zredbins.size))
zredindex[badzred] = 0
badrefmag, = np.where((refmagindex < 0) |
(refmagindex >= self.refmagbins.size))
refmagindex[badrefmag] = 0
lookup_vals = self.sigma_g[refmagindex, zredindex]
lookup_vals[badzred] = np.inf
lookup_vals[badrefmag] = np.inf
return lookup_vals
class BackgroundGenerator(object):
"""
Class to generate the galaxy background.
This class will use multiprocessing to generate the galaxy background table
to look up Sigma_g(z, chi-squared, refmag).
"""
def __init__(self, config):
"""
Instantiate a BackgroundGenerator
Parameters
----------
config: `redmapper.Configuration`
Redmapper configuration object
"""
# We need to delete "cosmo" from the config for pickling/multiprocessing
self.config = config.copy()
self.config.cosmo = None
def run(self, clobber=False, natatime=100000, deepmode=False):
"""
Generate the galaxy background using multiprocessing. The number of
cores used is specified in self.config.calib_nproc, and the output
filename is specified in self.config.bkgfile.
Parameters
----------
clobber: `bool`, optional
Overwrite any existing self.config.bkgfile file. Default is False.
natatime: `int`, optional
Number of galaxies to read at a time. Default is 100000.
deepmode: `bool`, optional
Run background to full depth of survey (rather than Lstar richness limit).
Default is False.
"""
self.natatime = natatime
self.deepmode = deepmode
if not clobber:
if os.path.isfile(self.config.bkgfile):
with fitsio.FITS(self.config.bkgfile) as fits:
if 'CHISQBKG' in [ext.get_extname() for ext in fits[1: ]]:
self.config.logger.info("CHISQBKG already in %s and clobber is False" % (self.config.bkgfile))
return
# get the ranges
self.refmagrange = np.array([12.0, self.config.limmag_catalog])
self.nrefmagbins = np.ceil((self.refmagrange[1] - self.refmagrange[0]) / self.config.bkg_refmagbinsize).astype(np.int32)
self.refmagbins = np.arange(self.nrefmagbins) * self.config.bkg_refmagbinsize + self.refmagrange[0]
self.chisqrange = np.array([0.0, self.config.chisq_max])
self.nchisqbins = np.ceil((self.chisqrange[1] - self.chisqrange[0]) / self.config.bkg_chisqbinsize).astype(np.int32)
self.chisqbins = np.arange(self.nchisqbins) * self.config.bkg_chisqbinsize + self.chisqrange[0]
self.lnchisqbinsize = 0.2
self.lnchisqrange = np.array([-2.0, 6.0])
self.nlnchisqbins = np.ceil((self.lnchisqrange[1] - self.lnchisqrange[0]) / self.lnchisqbinsize).astype(np.int32)
self.lnchisqbins = np.arange(self.nlnchisqbins) * self.lnchisqbinsize + self.lnchisqrange[0]
self.nzbins = np.ceil((self.config.zrange[1] - self.config.zrange[0]) / self.config.bkg_zbinsize).astype(np.int32)
self.zbins = np.arange(self.nzbins) * self.config.bkg_zbinsize + self.config.zrange[0]
# this is the background hist
sigma_g = np.zeros((self.nrefmagbins, self.nchisqbins, self.nzbins))
sigma_lng = np.zeros((self.nrefmagbins, self.nlnchisqbins, self.nzbins))
# We need the areas from the depth map
if self.config.depthfile is not None:
depthstr = DepthMap(self.config)
self.areas = depthstr.calc_areas(self.refmagbins)
else:
self.areas = np.zeros(self.refmagbins.size) + self.config.area
# Split into bins for parallel running
logrange = np.log(np.array([self.config.zrange[0] - 0.001,
self.config.zrange[1] + 0.001]))
logbinsize = (logrange[1] - logrange[0]) / self.config.calib_nproc
zedges = (np.exp(logrange[0]) + np.exp(logrange[1])) - np.exp(logrange[0] + np.arange(self.config.calib_nproc + 1) * logbinsize)
worker_list = []
for i in range(self.config.calib_nproc):
ubins, = np.where((self.zbins < zedges[i]) & (self.zbins > zedges[i + 1]))
gd, = np.where(ubins < self.zbins.size)
# If we have more processes than bins, some of these will be empty
# and this prevents us from adding them to the list
if gd.size == 0:
continue
ubins = ubins[gd]
zbinmark = np.zeros(self.zbins.size, dtype=bool)
zbinmark[ubins] = True
worker_list.append(zbinmark)
mp_ctx = multiprocessing.get_context("fork")
pool = mp_ctx.Pool(processes=self.config.calib_nproc)
retvals = pool.map(self._worker, worker_list, chunksize=1)
pool.close()
pool.join()
# And store the results
for zbinmark, sigma_g_sub, sigma_lng_sub in retvals:
sigma_g[:, :, zbinmark] = sigma_g_sub
sigma_lng[:, :, zbinmark] = sigma_lng_sub
# And save them
dtype = [('zbins', 'f4', self.zbins.size),
('zrange', 'f4', 2),
('zbinsize', 'f4'),
('chisq_index', 'i4'),
('refmag_index', 'i4'),
('chisqbins', 'f4', self.chisqbins.size),
('chisqrange', 'f4', 2),
('chisqbinsize', 'f4'),
('lnchisqbins', 'f4', self.lnchisqbins.size),
('lnchisqrange', 'f4', 2),
('lnchisqbinsize', 'f4'),
('areas', 'f4', self.areas.size),
('refmagbins', 'f4', self.refmagbins.size),
('refmagrange', 'f4', 2),
('refmagbinsize', 'f4'),
('sigma_g', 'f4', sigma_g.shape),
('sigma_lng', 'f4', sigma_lng.shape)]
chisq_bkg = Entry(np.zeros(1, dtype=dtype))
chisq_bkg.zbins[:] = self.zbins
chisq_bkg.zrange[:] = self.config.zrange
chisq_bkg.zbinsize = self.config.bkg_zbinsize
chisq_bkg.chisq_index = 0
chisq_bkg.refmag_index = 1
chisq_bkg.chisqbins[:] = self.chisqbins
chisq_bkg.chisqrange[:] = self.chisqrange
chisq_bkg.chisqbinsize = self.config.bkg_chisqbinsize
chisq_bkg.lnchisqbins[:] = self.lnchisqbins
chisq_bkg.lnchisqrange[:] = self.lnchisqrange
chisq_bkg.lnchisqbinsize = self.lnchisqbinsize
chisq_bkg.areas[:] = self.areas
chisq_bkg.refmagbins[:] = self.refmagbins
chisq_bkg.refmagrange[:] = self.refmagrange
chisq_bkg.refmagbinsize = self.config.bkg_refmagbinsize
chisq_bkg.sigma_g[:, :] = sigma_g
chisq_bkg.sigma_lng[:, :] = sigma_lng
chisq_bkg.to_fits_file(self.config.bkgfile, extname='CHISQBKG', clobber=clobber)
def _worker(self, zbinmark):
"""
Internal worker method for multiprocessing.
Parameters
----------
zbinmark: `np.array`
Indices for the redshift bins to run in this job
Returns
-------
retvals: `tuple`
zbinmark: `np.array`
Indices for redshift bins run in this job
sigma_g_sub: `np.array`
Sigma_g(x) for the redshift bins in zbinmark
sigma_lng_sub: `np.array`
Sigma_lng(x) (log binning) for the redshift bins in zbinmark
"""
starttime = time.time()
zbins_use = self.zbins[zbinmark]
zrange_use = np.array([zbins_use[0], zbins_use[-1] + self.config.bkg_zbinsize])
# We need to load in the red sequence structure -- just in the specific redshift range
self.config.logger.info('Loading red sequence color pars...')
zredstr = RedSequenceColorPar(self.config.parfile, zrange=zrange_use)
zredstrbinsize = zredstr.z[1] - zredstr.z[0]
zpos = np.searchsorted(zredstr.z, zbins_use)
# How many galaxies total?
self.config.logger.info('Counting galaxies...')
if self.config.galfile_pixelized and not self.config.load_merged:
self.config.logger.info('Counting galaxies from pixelized galfile...')
master = Entry.from_fits_file(self.config.galfile)
if len(self.config.d.hpix) > 0:
# We need to take a sub-region
theta, phi = hp.pix2ang(master.nside, master.hpix)
ipring_big = hp.ang2pix(self.config.d.nside, theta, phi)
_, subreg_indices = esutil.numpy_util.match(self.config.d.hpix, ipring_big)
subreg_indices = np.unique(subreg_indices)
else:
subreg_indices = np.arange(master.hpix.size)
ngal = np.sum(master.ngals[subreg_indices])
npix = subreg_indices.size
else:
self.config.logger.info('Counting galaxies from merged galfile header...')
hdr = fitsio.read_header(self.config.galfile, ext=1)
ngal = hdr['NAXIS2']
npix = 0
nmag = self.config.nmag
ncol = nmag - 1
# default values are all guaranteed to be out of range
chisqs = np.zeros((ngal, zbins_use.size), dtype=np.float32) + np.exp(np.max(self.lnchisqbins)) + 100.0
refmags = np.zeros(ngal, dtype=np.float32)
self.config.logger.info('Getting zlimmag...')
if (self.deepmode):
zlimmag = np.atleast_1d(zredstr.mstar(zbins_use + self.config.bkg_zbinsize) - 2.5 * np.log10(0.01))
else:
zlimmag = np.atleast_1d(zredstr.mstar(zbins_use + self.config.bkg_zbinsize) - 2.5 * np.log10(0.1))
self.config.logger.info('Filtering for bad zlimmag...')
bad, = np.where(zlimmag >= self.config.limmag_catalog)
zlimmag[bad] = self.config.limmag_catalog - 0.01
zlimmagpos = np.clip(((zlimmag - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])).astype(np.int32), 0, self.nrefmagbins - 1)
zlimmag = self.refmagbins[zlimmagpos] + self.config.bkg_refmagbinsize
zbinmid = np.median(np.arange(zredstr.z.size - 1))
# And the main loop
ctr = 0
p = 0
# This covers both loops
while ((ctr < ngal) and (p < npix)):
# Read in a section of the galaxies, or the pixel
if not self.config.galfile_pixelized:
lo = ctr
hi = np.clip(ctr + self.natatime, None, ngal)
gals = GalaxyCatalog.from_fits_file(self.config.galfile, rows=np.arange(lo, hi))
ctr = hi + 1
elif self.config.load_merged:
lo = ctr
hi = np.clip(ctr + self.natatime, None, ngal)
gals = GalaxyCatalog.from_galfile(self.config.galfile, nside=0, hpix=[])
ctr = hi + 1
else:
if master.ngals[subreg_indices[p]] == 0:
p += 1
continue
gals = GalaxyCatalog.from_galfile(self.config.galfile, nside=master.nside,
hpix=master.hpix[subreg_indices[p]], border=0.0)
lo = ctr
hi = ctr + gals.size
ctr += master.ngals[subreg_indices[p]]
p += 1
inds = np.arange(lo, hi)
refmags[inds] = gals.refmag
for i, zbin in enumerate(zbins_use):
use, = np.where((gals.refmag > self.refmagrange[0]) &
(gals.refmag < zlimmag[i]))
if (use.size > 0):
# Compute chisq at the redshift zbin
chisqs[inds[use], i] = zredstr.calculate_chisq(gals[use], zbin)
binsizes = self.config.bkg_refmagbinsize * self.config.bkg_chisqbinsize
lnbinsizes = self.config.bkg_refmagbinsize * self.lnchisqbinsize
sigma_g_sub = np.zeros((self.nrefmagbins, self.nchisqbins, zbins_use.size))
sigma_lng_sub = np.zeros((self.nrefmagbins, self.nlnchisqbins, zbins_use.size))
self.config.logger.info('Calculating bkg for each zbin...')
for i, zbin in enumerate(tqdm(zbins_use)):
use, = np.where((chisqs[:, i] >= self.chisqrange[0]) &
(chisqs[:, i] < self.chisqrange[1]) &
(refmags >= self.refmagrange[0]) &
(refmags < self.refmagrange[1]))
chisqpos = (chisqs[use, i] - self.chisqrange[0]) * self.nchisqbins / (self.chisqrange[1] - self.chisqrange[0])
refmagpos = (refmags[use] - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])
value = np.ones(use.size)
field = cic(value, chisqpos, self.nchisqbins, refmagpos, self.nrefmagbins, isolated=True)
for j in range(self.nchisqbins):
sigma_g_sub[:, j, i] = field[:, j] / (self.areas * binsizes)
lnchisqs = np.log(chisqs[:, i])
use, = np.where((lnchisqs >= self.lnchisqrange[0]) &
(lnchisqs < self.lnchisqrange[1]) &
(refmags >= self.refmagrange[0]) &
(refmags < self.refmagrange[1]))
lnchisqpos = (lnchisqs[use] - self.lnchisqrange[0]) * self.nlnchisqbins / (self.lnchisqrange[1] - self.lnchisqrange[0])
refmagpos = (refmags[use] - self.refmagrange[0]) * self.nrefmagbins / (self.refmagrange[1] - self.refmagrange[0])
value = np.ones(use.size)
field2 = cic(value, lnchisqpos, self.nlnchisqbins, refmagpos, self.nrefmagbins, isolated=True)
for j in range(self.nlnchisqbins):
sigma_lng_sub[:, j, i] = field2[:, j] / (self.areas * lnbinsizes)
self.config.logger.info("Finished %.4f < z < %.4f in %.1f seconds" % (zbins_use[0], zbins_use[-1],
time.time() - starttime))
return (zbinmark, sigma_g_sub, sigma_lng_sub)
class ZredBackgroundGenerator(object):
"""
Class to generate the zred galaxy background.
This class will generate the zred galaxy background
table to look up Sigma_g(zred, refmag).
"""
def __init__(self, config):
"""
Instantiate a ZredBackgroundGenerator
Parameters
----------
config: `redmapper.Configuration`
Redmapper configuration object
"""
self.config = config
def run(self, clobber=False, natatime=100000):
"""
Generate the zred galaxy background. The output filename is specified
in self.config.bkgfile.
Parameters
----------
clobber: `bool`, optional
Overwrite any existing self.config.bkgfile file. Default is False.
natatime: `int`, optional
Number of galaxies to read at a time. Default is 100000
"""
if not os.path.isfile(self.config.zredfile):
raise RuntimeError("Must run ZredBackgroundGenerator with a zred file")
if not clobber:
if os.path.isfile(self.config.bkgfile):
with fitsio.FITS(self.config.bkgfile) as fits:
if 'ZREDBKG' in [ext.get_extname() for ext in fits[1: ]]:
self.config.logger.info("ZREDBKG already in %s and clobber is False" % (self.config.bkgfile))
return
# Read in zred parameters
zredstr = RedSequenceColorPar(self.config.parfile, fine=True, zrange=self.config.zrange)
# Set ranges
refmagrange = np.array([12.0, self.config.limmag_catalog])
nrefmagbins = np.ceil((refmagrange[1] - refmagrange[0]) / self.config.bkg_refmagbinsize).astype(np.int32)
refmagbins = np.arange(nrefmagbins) * self.config.bkg_refmagbinsize + refmagrange[0]
zredrange = np.array([zredstr.z[0], zredstr.z[-2] + (zredstr.z[1] - zredstr.z[0])])
nzredbins = np.ceil((zredrange[1] - zredrange[0]) / self.config.bkg_zredbinsize).astype(np.int32)
zredbins = np.arange(nzredbins) * self.config.bkg_zredbinsize + zredrange[0]
# Compute the areas...
# This takes into account the configured sub-region
if self.config.depthfile is not None:
depthstr = DepthMap(self.config)
areas = depthstr.calc_areas(refmagbins)
else:
areas = np.zeros(refmagbins.size) + self.config.area
maxchisq = self.config.wcen_zred_chisq_max
# Prepare pixels (if necessary) and count galaxies
if not self.config.galfile_pixelized:
raise ValueError("Only pixelized galfiles are supported at this moment.")
master = Entry.from_fits_file(self.config.galfile)
if len(self.config.d.hpix) > 0:
# We need to take a sub-region
theta, phi = hp.pix2ang(master.nside, master.hpix)
ipring_big = hp.ang2pix(self.config.d.nside, theta, phi)
_, subreg_indices = esutil.numpy_util.match(self.config.d.hpix, ipring_big)
subreg_indices = np.unique(subreg_indices)
#ipring_bin = hp.ang2pix(self.config.d.nside, theta, phi)
#subreg_indices, = np.where(ipring_bin == self.config.d.hpix[0])
else:
subreg_indices = np.arange(master.hpix.size)
ngal = np.sum(master.ngals[subreg_indices])
npix = subreg_indices.size
starttime = time.time()
nmag = self.config.nmag
ncol = nmag - 1
zreds = np.zeros(ngal, dtype=np.float32) - 1.0
refmags = np.zeros(ngal, dtype=np.float32)
zbinmid = np.median(np.arange(zredstr.z.size, dtype=np.int32))
# Loop
ctr = 0
p = 0
while ((ctr < ngal) and (p < npix)):
if master.ngals[subreg_indices[p]] == 0:
p += 1
continue
gals = GalaxyCatalog.from_galfile(self.config.galfile, nside=master.nside,
hpix=master.hpix[subreg_indices[p]],
border=0.0,
zredfile=self.config.zredfile)
use, = np.where(gals.chisq < maxchisq)
if use.size > 0:
lo = ctr
hi = ctr + use.size
inds = np.arange(lo, hi, dtype=np.int64)
refmags[inds] = gals.refmag[use]
zreds[inds] = gals.zred[use]
ctr += master.ngals[subreg_indices[p]]
p += 1
# Compute cic
sigma_g = np.zeros((nrefmagbins, nzredbins))
binsizes = self.config.bkg_refmagbinsize * self.config.bkg_zredbinsize
use, = np.where((zreds >= zredrange[0]) & (zreds < zredrange[1]) &
(refmags > refmagrange[0]) & (refmags < refmagrange[1]))
zredpos = (zreds[use] - zredrange[0]) * nzredbins / (zredrange[1] - zredrange[0])
refmagpos = (refmags[use] - refmagrange[0]) * nrefmagbins / (refmagrange[1] - refmagrange[0])
value = np.ones(use.size)
field = cic(value, zredpos, nzredbins, refmagpos, nrefmagbins, isolated=True)
sigma_g[:, :] = field
for j in range(nzredbins):
sigma_g[:, j] = np.clip(field[:, j], 0.1, None) / (areas * binsizes)
self.config.logger.info("Finished zred background in %.2f seconds" % (time.time() - starttime))
# save it
dtype = [('zredbins', 'f4', zredbins.size),
('zredrange', 'f4', zredrange.size),
('zredbinsize', 'f4'),
('zred_index', 'i2'),
('refmag_index', 'i2'),
('refmagbins', 'f4', refmagbins.size),
('refmagrange', 'f4', refmagrange.size),
('refmagbinsize', 'f4'),
('areas', 'f4', areas.size),
('sigma_g', 'f4', sigma_g.shape)]
zred_bkg = Entry(np.zeros(1, dtype=dtype))
zred_bkg.zredbins[:] = zredbins
zred_bkg.zredrange[:] = zredrange
zred_bkg.zredbinsize = self.config.bkg_zredbinsize
zred_bkg.zred_index = 0
zred_bkg.refmag_index = 1
zred_bkg.refmagbins[:] = refmagbins
zred_bkg.refmagrange[:] = refmagrange
zred_bkg.refmagbinsize = self.config.bkg_refmagbinsize
zred_bkg.areas[:] = areas
zred_bkg.sigma_g[:, :] = sigma_g
zred_bkg.to_fits_file(self.config.bkgfile, extname='ZREDBKG', clobber=clobber)
| 38.564644
| 170
| 0.589046
|
acff1e1f055b5e04cc541be7dcdc21c692f5eaf1
| 8,210
|
py
|
Python
|
luna/gateware/usb/request/standard.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/usb/request/standard.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/usb/request/standard.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Standard, full-gateware control request handlers. """
import unittest
from nmigen import Module, Elaboratable, Cat
from usb_protocol.types import USBStandardRequests, USBRequestType
from usb_protocol.emitters import DeviceDescriptorCollection
from ..usb2.request import RequestHandlerInterface, USBRequestHandler
from ..usb2.descriptor import GetDescriptorHandler
from ..stream import USBInStreamInterface
from ...stream.generator import StreamSerializer
class StandardRequestHandler(USBRequestHandler):
""" Pure-gateware USB setup request handler. Implements the standard requests required for enumeration. """
def __init__(self, descriptors: DeviceDescriptorCollection):
"""
Parameters:
descriptors -- The DeviceDescriptorCollection that contains our descriptors.
"""
self.descriptors = descriptors
super().__init__()
def handle_register_write_request(self, m, new_value_signal, write_strobe, stall_condition=0):
""" Fills in the current state with a request handler meant to set a register.
Parameters:
new_value_signal -- The signal to receive the new value to be applied to the relevant register.
write_strobe -- The signal which will be pulsed when new_value_signal contains a update.
stall_condition -- If provided, if this condition is true, the request will be STALL'd instead
of acknowledged.
"""
# Provide an response to the STATUS stage.
with m.If(self.interface.status_requested):
# If our stall condition is met, stall; otherwise, send a ZLP [USB 8.5.3].
with m.If(stall_condition):
m.d.comb += self.interface.handshakes_out.stall.eq(1)
with m.Else():
m.d.comb += self.send_zlp()
# Accept the relevant value after the packet is ACK'd...
with m.If(self.interface.handshakes_in.ack):
m.d.comb += [
write_strobe .eq(1),
new_value_signal .eq(self.interface.setup.value[0:7])
]
# ... and then return to idle.
m.next = 'IDLE'
def handle_simple_data_request(self, m, transmitter, data, length=1):
""" Fills in a given current state with a request that returns a given piece of data.
For e.g. GET_CONFIGURATION and GET_STATUS requests.
Parameters:
transmitter -- The transmitter module we're working with.
data -- The data to be returned.
"""
# Connect our transmitter up to the output stream...
m.d.comb += [
transmitter.stream .attach(self.interface.tx),
Cat(transmitter.data[0:1]) .eq(data),
transmitter.max_length .eq(length)
]
# ... trigger it to respond when data's requested...
with m.If(self.interface.data_requested):
m.d.comb += transmitter.start.eq(1)
# ... and ACK our status stage.
with m.If(self.interface.status_requested):
m.d.comb += self.interface.handshakes_out.ack.eq(1)
m.next = 'IDLE'
def elaborate(self, platform):
m = Module()
interface = self.interface
# Create convenience aliases for our interface components.
setup = interface.setup
handshake_generator = interface.handshakes_out
tx = interface.tx
#
# Submodules
#
# Handler for Get Descriptor requests; responds with our various fixed descriptors.
m.submodules.get_descriptor = get_descriptor_handler = GetDescriptorHandler(self.descriptors)
m.d.comb += [
get_descriptor_handler.value .eq(setup.value),
get_descriptor_handler.length .eq(setup.length),
]
# Handler for various small-constant-response requests (GET_CONFIGURATION, GET_STATUS).
m.submodules.transmitter = transmitter = \
StreamSerializer(data_length=2, domain="usb", stream_type=USBInStreamInterface, max_length_width=2)
#
# Handlers.
#
with m.If(setup.type == USBRequestType.STANDARD):
with m.FSM(domain="usb"):
# IDLE -- not handling any active request
with m.State('IDLE'):
# If we've received a new setup packet, handle it.
# TODO: limit this to standard requests
with m.If(setup.received):
# Select which standard packet we're going to handler.
with m.Switch(setup.request):
with m.Case(USBStandardRequests.GET_STATUS):
m.next = 'GET_STATUS'
with m.Case(USBStandardRequests.SET_ADDRESS):
m.next = 'SET_ADDRESS'
with m.Case(USBStandardRequests.SET_CONFIGURATION):
m.next = 'SET_CONFIGURATION'
with m.Case(USBStandardRequests.GET_DESCRIPTOR):
m.next = 'GET_DESCRIPTOR'
with m.Case(USBStandardRequests.GET_CONFIGURATION):
m.next = 'GET_CONFIGURATION'
with m.Case():
m.next = 'UNHANDLED'
# GET_STATUS -- Fetch the device's status.
# For now, we'll always return '0'.
with m.State('GET_STATUS'):
# TODO: handle reporting endpoint stall status
# TODO: copy the remote wakeup and bus-powered attributes from bmAttributes of the relevant descriptor?
self.handle_simple_data_request(m, transmitter, 0, length=2)
# SET_ADDRESS -- The host is trying to assign us an address.
with m.State('SET_ADDRESS'):
self.handle_register_write_request(m, interface.new_address, interface.address_changed)
# SET_CONFIGURATION -- The host is trying to select an active configuration.
with m.State('SET_CONFIGURATION'):
# TODO: stall if we don't have a relevant configuration
self.handle_register_write_request(m, interface.new_config, interface.config_changed)
# GET_DESCRIPTOR -- The host is asking for a USB descriptor -- for us to "self describe".
with m.State('GET_DESCRIPTOR'):
m.d.comb += [
get_descriptor_handler.tx .attach(tx),
handshake_generator.stall .eq(get_descriptor_handler.stall)
]
# Respond to our data stage with a descriptor...
with m.If(interface.data_requested):
m.d.comb += get_descriptor_handler.start .eq(1),
# ... and ACK our status stage.
with m.If(interface.status_requested):
m.d.comb += handshake_generator.ack.eq(1)
m.next = 'IDLE'
# GET_CONFIGURATION -- The host is asking for the active configuration number.
with m.State('GET_CONFIGURATION'):
self.handle_simple_data_request(m, transmitter, interface.active_config)
# UNHANDLED -- we've received a request we're not prepared to handle
with m.State('UNHANDLED'):
# When we next have an opportunity to stall, do so,
# and then return to idle.
with m.If(interface.data_requested | interface.status_requested):
m.d.comb += handshake_generator.stall.eq(1)
m.next = 'IDLE'
return m
if __name__ == "__main__":
unittest.main(warnings="ignore")
| 40.44335
| 123
| 0.578441
|
acff1e28224da0658c8a5600ee3bd42ece7758a0
| 767
|
py
|
Python
|
boards/milador-nrf52-0.1.0/libraries/Adafruit_TinyUSB_Arduino/examples/HID/hid_generic_inout/hid_test.py
|
milador/milador-arduino
|
d0f9ebe33e0e67a921f96d0e278d9d646b8bad91
|
[
"MIT"
] | null | null | null |
boards/milador-nrf52-0.1.0/libraries/Adafruit_TinyUSB_Arduino/examples/HID/hid_generic_inout/hid_test.py
|
milador/milador-arduino
|
d0f9ebe33e0e67a921f96d0e278d9d646b8bad91
|
[
"MIT"
] | null | null | null |
boards/milador-nrf52-0.1.0/libraries/Adafruit_TinyUSB_Arduino/examples/HID/hid_generic_inout/hid_test.py
|
milador/milador-arduino
|
d0f9ebe33e0e67a921f96d0e278d9d646b8bad91
|
[
"MIT"
] | null | null | null |
# Install python3 HID package https://pypi.org/project/hid/
import hid
# default is Adafruit VID
USB_VID = 0x239A
print("Openning HID device with VID = 0x%X" % USB_VID)
for dict in hid.enumerate(USB_VID):
print(dict)
dev = hid.Device(dict['vendor_id'], dict['product_id'])
if dev:
while True:
# Get input from console and encode to UTF8 for array of chars.
# hid generic inout is single report therefore by HIDAPI requirement
# it must be preceeded with 0x00 as dummy reportID
str_out = b'\x00'
str_out += input("Send text to HID Device : ").encode('utf-8')
dev.write(str_out)
str_in = dev.read(64)
print("Received from HID Device:", str_in, '\n')
| 34.863636
| 80
| 0.623207
|
acff1f27c6020b2856358cf84740606ea614f1eb
| 961
|
py
|
Python
|
#47. Permutations II.py
|
medisean/leetcode
|
f218fb738fb2b57f5eea3795a0a02cf495561465
|
[
"MIT"
] | null | null | null |
#47. Permutations II.py
|
medisean/leetcode
|
f218fb738fb2b57f5eea3795a0a02cf495561465
|
[
"MIT"
] | null | null | null |
#47. Permutations II.py
|
medisean/leetcode
|
f218fb738fb2b57f5eea3795a0a02cf495561465
|
[
"MIT"
] | null | null | null |
'''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
排列组合算法 3! = 6
'''
class Solution:
def permute(self, nums: [int]) -> [[int]]:
results = []
for i in range(len(nums)):
if i == 0:
results.append([nums[i]])
else:
z = []
for r in results:
for k in range(len(r)):
t = r.copy()
t.insert(k, nums[i])
if t not in z:
z.append(t)
t = r.copy()
t.append(nums[i])
if t not in z:
z.append(t)
results = z
return results
if __name__ == '__main__':
solution = Solution()
print(solution.permute([1, 2, 1]))
| 22.348837
| 74
| 0.391259
|
acff214608f592e6039398081f65cd77afe83a74
| 810
|
py
|
Python
|
strstr/2.py
|
stonemary/lintcode_solutions
|
f41fd0e56fb88ab54d0ab624977bff1623a6d33a
|
[
"Apache-2.0"
] | null | null | null |
strstr/2.py
|
stonemary/lintcode_solutions
|
f41fd0e56fb88ab54d0ab624977bff1623a6d33a
|
[
"Apache-2.0"
] | null | null | null |
strstr/2.py
|
stonemary/lintcode_solutions
|
f41fd0e56fb88ab54d0ab624977bff1623a6d33a
|
[
"Apache-2.0"
] | null | null | null |
# 15 mins
class Solution:
def strStr(self, source, target):
source_pointer = 0
target_pointer = 0
if source is None or target is None:
return -1
while source_pointer < len(source):
if target_pointer == len(target):
return source_pointer - len(target)
if source[source_pointer] == target[target_pointer]:
target_pointer += 1
elif source[source_pointer] == target[0]:
target_pointer = 1
else:
target_pointer = 0
source_pointer += 1
else:
if target_pointer == len(target):
return source_pointer - len(target)
return -1
| 28.928571
| 64
| 0.485185
|
acff21d32ab75fe985dda114c20d947a85a42c6e
| 2,126
|
py
|
Python
|
tokio/cli/cache_slurm.py
|
NERSC/pytokio
|
22244718cf82567c50620cbe0e635dfc990de36b
|
[
"BSD-3-Clause-LBNL"
] | 22
|
2017-11-14T01:30:48.000Z
|
2022-01-01T21:51:00.000Z
|
tokio/cli/cache_slurm.py
|
glennklockwood/pytokio
|
22244718cf82567c50620cbe0e635dfc990de36b
|
[
"BSD-3-Clause-LBNL"
] | 39
|
2017-12-20T01:42:19.000Z
|
2020-05-28T21:17:26.000Z
|
tokio/cli/cache_slurm.py
|
glennklockwood/pytokio
|
22244718cf82567c50620cbe0e635dfc990de36b
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2018-02-06T19:39:19.000Z
|
2019-07-10T01:20:26.000Z
|
"""
Provides CLI interfaces for
:meth:`tokio.connectors.slurm.Slurm.to_dataframe` and
:meth:`tokio.connectors.slurm.Slurm.to_json`.
"""
import os
import argparse
import tokio.connectors.slurm
def main(argv=None):
"""Entry point for the CLI interface
"""
parser = argparse.ArgumentParser()
parser.add_argument("slurm_jobid", type=str, help="slurm jobid to process")
parser.add_argument("-n", "--native", action="store_true", default=True,
help="return output in native format")
parser.add_argument("-j", "--json", action="store_true", help="return output in JSON format")
parser.add_argument("-c", "--csv", action="store_true", help="return output in CSV format")
parser.add_argument("-o", "--output", type=str, default=None, help="output file")
args = parser.parse_args(argv)
jobid = args.slurm_jobid
# the following assumption only gets goofy if you have a file named the same
# thing as a jobid
if os.path.isfile(jobid):
# treat the jobid as a cache file if it exists
slurm_data = tokio.connectors.slurm.Slurm(cache_file=jobid)
else:
# treat the jobid as a slurm jobid and call sacct
slurm_data = tokio.connectors.slurm.Slurm(jobid)
# Serialize the object
cache_file = args.output
if cache_file is not None:
print("Caching to %s" % cache_file)
if cache_file is None:
if args.csv:
print((slurm_data.to_dataframe()).to_csv())
elif args.json:
print(slurm_data.to_json(indent=4, sort_keys=True))
elif args.native:
print(str(slurm_data))
else:
raise Exception("No output format specified")
else:
if args.csv:
(slurm_data.to_dataframe()).to_csv(cache_file)
elif args.json:
with open(cache_file, 'w') as cache_fd:
cache_fd.write(slurm_data.to_json())
elif args.native:
with open(cache_file, 'w') as cache_fd:
cache_fd.write(str(slurm_data))
else:
raise Exception("No output format specified")
| 36.033898
| 97
| 0.639699
|
acff234179eebfc040a19396abc376a7e8f1bb90
| 77
|
py
|
Python
|
hello_world.py
|
natewachter/ASTR-119
|
4494731fdf69397b9701d6594b60f67326656558
|
[
"MIT"
] | null | null | null |
hello_world.py
|
natewachter/ASTR-119
|
4494731fdf69397b9701d6594b60f67326656558
|
[
"MIT"
] | 3
|
2020-10-01T17:51:33.000Z
|
2020-11-29T23:51:04.000Z
|
hello_world.py
|
natewachter/ASTR-119
|
4494731fdf69397b9701d6594b60f67326656558
|
[
"MIT"
] | null | null | null |
#This program will print out a simple welcome message.
print("Hello World!")
| 25.666667
| 54
| 0.766234
|
acff234ac7d43aa3e0f50309b131cf919d9acdee
| 322
|
py
|
Python
|
firebase/firestore-py/bin/signout.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 3
|
2021-10-03T11:37:11.000Z
|
2022-01-20T15:39:58.000Z
|
firebase/firestore-py/bin/signout.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 58
|
2020-03-10T18:48:52.000Z
|
2021-08-31T23:19:09.000Z
|
firebase/firestore-py/bin/signout.py
|
Ramaz-Upper-School/RamLife
|
5015c72f6e6dc53cd5dd37bd3f0f87caf40ec0c4
|
[
"MIT"
] | 8
|
2020-09-08T18:29:54.000Z
|
2021-04-20T23:11:50.000Z
|
from firebase_admin import delete_app
import lib.services as firebase
import lib.utils as utils
if __name__ == "__main__":
utils.logger.info("Signing out all users...")
for user in firebase.list_users():
firebase.auth.revoke_token(user)
delete_app(firebase.app)
utils.logger.info("All users have been signed out")
| 26.833333
| 52
| 0.773292
|
acff237612c06a8cb2bf3dac37240e94e12ffef0
| 305
|
py
|
Python
|
data/multilingual/Latn.FAO/Serif_16/pdf_to_json_test_Latn.FAO_Serif_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.FAO/Serif_16/pdf_to_json_test_Latn.FAO_Serif_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.FAO/Serif_16/pdf_to_json_test_Latn.FAO_Serif_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.FAO/Serif_16/udhr_Latn.FAO_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5
| 75
| 0.813115
|
acff24208a93b7f9fa274d1eccb02cbf3962a2b5
| 20,418
|
py
|
Python
|
src/scenic/syntax/veneer.py
|
shr-project/Scenic
|
e3ea4c426af7619b4308233fa5e7a33ebcf5dcb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/syntax/veneer.py
|
shr-project/Scenic
|
e3ea4c426af7619b4308233fa5e7a33ebcf5dcb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/syntax/veneer.py
|
shr-project/Scenic
|
e3ea4c426af7619b4308233fa5e7a33ebcf5dcb9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Veneer library, with Python implementations of Scenic language constructs.
This module is automatically imported by all Scenic programs. In addition to
defining the built-in functions, operators, specifiers, etc., it also stores
global state such as the list of all created Scenic objects.
"""
__all__ = (
# Primitive statements and functions
'ego', 'require', 'resample', 'param', 'mutate', 'verbosePrint',
'sin', 'cos', 'hypot', 'max', 'min',
# Prefix operators
'Visible',
'Front', 'Back', 'Left', 'Right',
'FrontLeft', 'FrontRight', 'BackLeft', 'BackRight',
# Infix operators
'FieldAt', 'RelativeTo', 'OffsetAlong', 'RelativePosition',
'RelativeHeading', 'ApparentHeading',
'DistanceFrom', 'AngleTo', 'AngleFrom', 'Follow', 'CanSee',
# Primitive types
'Vector', 'VectorField', 'PolygonalVectorField',
'Region', 'PointSetRegion', 'RectangularRegion', 'PolygonalRegion', 'PolylineRegion',
'Workspace', 'Mutator',
'Range', 'Options', 'Uniform', 'Normal',
# Constructible types
'Point', 'OrientedPoint', 'Object',
# Specifiers
'With',
'At', 'In', 'Beyond', 'VisibleFrom', 'VisibleSpec', 'OffsetBy', 'OffsetAlongSpec',
'Facing', 'FacingToward', 'ApparentlyFacing',
'LeftSpec', 'RightSpec', 'Ahead', 'Behind',
# Constants
'everywhere', 'nowhere',
# Temporary stuff... # TODO remove
'PropertyDefault'
)
# various Python types and functions used in the language but defined elsewhere
from scenic.core.geometry import sin, cos, hypot, max, min
from scenic.core.vectors import Vector, VectorField, PolygonalVectorField
from scenic.core.regions import (Region, PointSetRegion, RectangularRegion,
PolygonalRegion, PolylineRegion, everywhere, nowhere)
from scenic.core.workspaces import Workspace
from scenic.core.distributions import Range, Options, Normal
Uniform = lambda *opts: Options(opts) # TODO separate these?
from scenic.core.object_types import Mutator, Point, OrientedPoint, Object
from scenic.core.specifiers import PropertyDefault # TODO remove
# everything that should not be directly accessible from the language is imported here:
import inspect
from scenic.core.distributions import Distribution, toDistribution
from scenic.core.type_support import isA, toType, toTypes, toScalar, toHeading, toVector
from scenic.core.type_support import evaluateRequiringEqualTypes, underlyingType
from scenic.core.geometry import RotatedRectangle, normalizeAngle, apparentHeadingAtPoint
from scenic.core.object_types import Constructible
from scenic.core.specifiers import Specifier
from scenic.core.lazy_eval import DelayedArgument
from scenic.core.utils import RuntimeParseError
from scenic.core.external_params import ExternalParameter
### Internals
activity = 0
evaluatingRequirement = False
allObjects = [] # ordered for reproducibility
egoObject = None
globalParameters = {}
externalParameters = [] # ordered for reproducibility
pendingRequirements = {}
inheritedReqs = [] # TODO improve handling of these?
def isActive():
"""Are we in the middle of compiling a Scenic module?
The 'activity' global can be >1 when Scenic modules in turn import other
Scenic modules."""
return activity > 0
def activate():
"""Activate the veneer when beginning to compile a Scenic module."""
global activity
activity += 1
assert not evaluatingRequirement
def deactivate():
"""Deactivate the veneer after compiling a Scenic module."""
global activity, allObjects, egoObject, globalParameters
global pendingRequirements, inheritedReqs
activity -= 1
assert activity >= 0
assert not evaluatingRequirement
allObjects = []
egoObject = None
globalParameters = {}
externalParameters = []
pendingRequirements = {}
inheritedReqs = []
def registerObject(obj):
"""Add a Scenic object to the global list of created objects.
This is called by the Object constructor."""
if activity > 0:
assert not evaluatingRequirement
assert isinstance(obj, Constructible)
allObjects.append(obj)
elif evaluatingRequirement:
raise RuntimeParseError('tried to create an object inside a requirement')
def registerExternalParameter(value):
"""Register a parameter whose value is given by an external sampler."""
if activity > 0:
assert isinstance(value, ExternalParameter)
externalParameters.append(value)
### Primitive statements and functions
def ego(obj=None):
"""Function implementing loads and stores to the 'ego' pseudo-variable.
The translator calls this with no arguments for loads, and with the source
value for stores.
"""
global egoObject
if obj is None:
if egoObject is None:
raise RuntimeParseError('referred to ego object not yet assigned')
elif not isinstance(obj, Object):
raise RuntimeParseError('tried to make non-object the ego object')
else:
egoObject = obj
return egoObject
def require(reqID, req, line, prob=1):
"""Function implementing the require statement."""
if evaluatingRequirement:
raise RuntimeParseError('tried to create a requirement inside a requirement')
# the translator wrapped the requirement in a lambda to prevent evaluation,
# so we need to save the current values of all referenced names; throw in
# the ego object too since it can be referred to implicitly
assert reqID not in pendingRequirements
pendingRequirements[reqID] = (req, getAllGlobals(req), egoObject, line, prob)
def getAllGlobals(req, restrictTo=None):
"""Find all names the given lambda depends on, along with their current bindings."""
namespace = req.__globals__
if restrictTo is not None and restrictTo is not namespace:
return {}
externals = inspect.getclosurevars(req)
assert not externals.nonlocals # TODO handle these
globs = dict(externals.builtins)
for name, value in externals.globals.items():
globs[name] = value
if inspect.isfunction(value):
subglobs = getAllGlobals(value, restrictTo=namespace)
for name, value in subglobs.items():
if name in globs:
assert value is globs[name]
else:
globs[name] = value
return globs
def resample(dist):
"""The built-in resample function."""
return dist.clone() if isinstance(dist, Distribution) else dist
def verbosePrint(msg):
"""Built-in function printing a message when the verbosity is >0."""
import scenic.syntax.translator as translator
if translator.verbosity >= 1:
indent = ' ' * activity if translator.verbosity >= 2 else ' '
print(indent + msg)
def param(*quotedParams, **params):
"""Function implementing the param statement."""
if evaluatingRequirement:
raise RuntimeParseError('tried to create a global parameter inside a requirement')
for name, value in params.items():
globalParameters[name] = toDistribution(value)
assert len(quotedParams) % 2 == 0, quotedParams
it = iter(quotedParams)
for name, value in zip(it, it):
globalParameters[name] = toDistribution(value)
def mutate(*objects): # TODO update syntax
"""Function implementing the mutate statement."""
if evaluatingRequirement:
raise RuntimeParseError('used mutate statement inside a requirement')
if len(objects) == 0:
objects = allObjects
for obj in objects:
if not isinstance(obj, Object):
raise RuntimeParseError('"mutate X" with X not an object')
obj.mutationEnabled = True
### Prefix operators
def Visible(region):
"""The 'visible <region>' operator."""
if not isinstance(region, Region):
raise RuntimeParseError('"visible X" with X not a Region')
return region.intersect(ego().visibleRegion)
# front of <object>, etc.
ops = (
'front', 'back', 'left', 'right',
'front left', 'front right',
'back left', 'back right'
)
template = '''\
def {function}(X):
"""The '{syntax} of <object>' operator."""
if not isinstance(X, Object):
raise RuntimeParseError('"{syntax} of X" with X not an Object')
return X.{property}
'''
for op in ops:
func = ''.join(word.capitalize() for word in op.split(' '))
prop = func[0].lower() + func[1:]
definition = template.format(function=func, syntax=op, property=prop)
exec(definition)
### Infix operators
def FieldAt(X, Y):
"""The '<VectorField> at <vector>' operator."""
if not isinstance(X, VectorField):
raise RuntimeParseError('"X at Y" with X not a vector field')
Y = toVector(Y, '"X at Y" with Y not a vector')
return X[Y]
def RelativeTo(X, Y):
"""The 'X relative to Y' polymorphic operator.
Allowed forms:
F relative to G (with at least one a field, the other a field or heading)
<vector> relative to <oriented point> (and vice versa)
<vector> relative to <vector>
<heading> relative to <heading>
"""
xf, yf = isA(X, VectorField), isA(Y, VectorField)
if xf or yf:
if xf and yf and X.valueType != Y.valueType:
raise RuntimeParseError('"X relative to Y" with X, Y fields of different types')
fieldType = X.valueType if xf else Y.valueType
error = '"X relative to Y" with field and value of different types'
def helper(context):
pos = context.position.toVector()
xp = X[pos] if xf else toType(X, fieldType, error)
yp = Y[pos] if yf else toType(Y, fieldType, error)
return xp + yp
return DelayedArgument({'position'}, helper)
else:
if isinstance(X, OrientedPoint): # TODO too strict?
if isinstance(Y, OrientedPoint):
raise RuntimeParseError('"X relative to Y" with X, Y both oriented points')
Y = toVector(Y, '"X relative to Y" with X an oriented point but Y not a vector')
return X.relativize(Y)
elif isinstance(Y, OrientedPoint):
X = toVector(X, '"X relative to Y" with Y an oriented point but X not a vector')
return Y.relativize(X)
else:
X = toTypes(X, (Vector, float), '"X relative to Y" with X neither a vector nor scalar')
Y = toTypes(Y, (Vector, float), '"X relative to Y" with Y neither a vector nor scalar')
return evaluateRequiringEqualTypes(lambda: X + Y, X, Y,
'"X relative to Y" with vector and scalar')
def OffsetAlong(X, H, Y):
"""The 'X offset along H by Y' polymorphic operator.
Allowed forms:
<vector> offset along <heading> by <vector>
<vector> offset along <field> by <vector>
"""
X = toVector(X, '"X offset along H by Y" with X not a vector')
Y = toVector(Y, '"X offset along H by Y" with Y not a vector')
if isinstance(H, VectorField):
H = H[X]
H = toHeading(H, '"X offset along H by Y" with H not a heading or vector field')
return X.offsetRotated(H, Y)
def RelativePosition(X, Y=None):
"""The 'relative position of <vector> [from <vector>]' operator.
If the 'from <vector>' is omitted, the position of ego is used.
"""
X = toVector(X, '"relative position of X from Y" with X not a vector')
if Y is None:
Y = ego()
Y = toVector(Y, '"relative position of X from Y" with Y not a vector')
return X - Y
def RelativeHeading(X, Y=None):
"""The 'relative heading of <heading> [from <heading>]' operator.
If the 'from <heading>' is omitted, the heading of ego is used.
"""
X = toHeading(X, '"relative heading of X from Y" with X not a heading')
if Y is None:
Y = ego().heading
else:
Y = toHeading(Y, '"relative heading of X from Y" with Y not a heading')
return normalizeAngle(X - Y)
def ApparentHeading(X, Y=None):
"""The 'apparent heading of <oriented point> [from <vector>]' operator.
If the 'from <vector>' is omitted, the position of ego is used.
"""
if not isinstance(X, OrientedPoint):
raise RuntimeParseError('"apparent heading of X from Y" with X not an OrientedPoint')
if Y is None:
Y = ego()
Y = toVector(Y, '"relative heading of X from Y" with Y not a vector')
return apparentHeadingAtPoint(X.position, X.heading, Y)
def DistanceFrom(X, Y=None):
"""The 'distance from <vector> [to <vector>]' operator.
If the 'to <vector>' is omitted, the position of ego is used.
"""
X = toVector(X, '"distance from X to Y" with X not a vector')
if Y is None:
Y = ego()
Y = toVector(Y, '"distance from X to Y" with Y not a vector')
return X.distanceTo(Y)
def AngleTo(X):
"""The 'angle to <vector>' operator (using the position of ego as the reference)."""
X = toVector(X, '"angle to X" with X not a vector')
return ego().angleTo(X)
def AngleFrom(X, Y):
"""The 'angle from <vector> to <vector>' operator."""
X = toVector(X, '"angle from X to Y" with X not a vector')
Y = toVector(Y, '"angle from X to Y" with Y not a vector')
return X.angleTo(Y)
def Follow(F, X, D):
"""The 'follow <field> from <vector> for <number>' operator."""
if not isinstance(F, VectorField):
raise RuntimeParseError('"follow F from X for D" with F not a vector field')
X = toVector(X, '"follow F from X for D" with X not a vector')
D = toScalar(D, '"follow F from X for D" with D not a number')
pos = F.followFrom(X, D)
heading = F[pos]
return OrientedPoint(position=pos, heading=heading)
def CanSee(X, Y):
"""The 'X can see Y' polymorphic operator.
Allowed forms:
<point> can see <object>
<point> can see <vector>
"""
if not isinstance(X, Point):
raise RuntimeParseError('"X can see Y" with X not a Point')
if isinstance(Y, Point):
return X.canSee(Y)
else:
Y = toVector(Y, '"X can see Y" with Y not a vector')
return X.visibleRegion.containsPoint(Y)
### Specifiers
def With(prop, val):
"""The 'with <property> <value>' specifier.
Specifies the given property, with no dependencies.
"""
return Specifier(prop, val)
def At(pos):
"""The 'at <vector>' specifier.
Specifies 'position', with no dependencies."""
pos = toVector(pos, 'specifier "at X" with X not a vector')
return Specifier('position', pos)
def In(region):
"""The 'in/on <region>' specifier.
Specifies 'position', with no dependencies. Optionally specifies 'heading'
if the given Region has a preferred orientation.
"""
region = toType(region, Region, 'specifier "in/on R" with R not a Region')
extras = {'heading'} if alwaysProvidesOrientation(region) else {}
return Specifier('position', Region.uniformPointIn(region), optionals=extras)
def alwaysProvidesOrientation(region):
"""Whether a Region or distribution over Regions always provides an orientation."""
if isinstance(region, Region):
return region.orientation is not None
elif isinstance(region, Options):
return all(alwaysProvidesOrientation(opt) for opt in region.options)
else:
return False
def Beyond(pos, offset, fromPt=None):
"""The 'beyond X by Y [from Z]' polymorphic specifier.
Specifies 'position', with no dependencies.
Allowed forms:
beyond <vector> by <number> [from <vector>]
beyond <vector> by <vector> [from <vector>]
If the 'from <vector>' is omitted, the position of ego is used.
"""
pos = toVector(pos, 'specifier "beyond X by Y" with X not a vector')
dType = underlyingType(offset)
if dType is float or dType is int:
offset = Vector(0, offset)
elif dType is not Vector:
raise RuntimeParseError('specifier "beyond X by Y" with Y not a number or vector')
if fromPt is None:
fromPt = ego()
fromPt = toVector(fromPt, 'specifier "beyond X by Y from Z" with Z not a vector')
lineOfSight = fromPt.angleTo(pos)
return Specifier('position', pos.offsetRotated(lineOfSight, offset))
def VisibleFrom(base):
"""The 'visible from <Point>' specifier.
Specifies 'position', with no dependencies.
This uses the given object's 'visibleRegion' property, and so correctly
handles the view regions of Points, OrientedPoints, and Objects.
"""
if not isinstance(base, Point):
raise RuntimeParseError('specifier "visible from O" with O not a Point')
return Specifier('position', Region.uniformPointIn(base.visibleRegion))
def VisibleSpec():
"""The 'visible' specifier (equivalent to 'visible from ego').
Specifies 'position', with no dependencies.
"""
return VisibleFrom(ego())
def OffsetBy(offset):
"""The 'offset by <vector>' specifier.
Specifies 'position', with no dependencies.
"""
offset = toVector(offset, 'specifier "offset by X" with X not a vector')
pos = RelativeTo(offset, ego()).toVector()
return Specifier('position', pos)
def OffsetAlongSpec(direction, offset):
"""The 'offset along X by Y' polymorphic specifier.
Specifies 'position', with no dependencies.
Allowed forms:
offset along <heading> by <vector>
offset along <field> by <vector>
"""
return Specifier('position', OffsetAlong(ego(), direction, offset))
def Facing(heading):
"""The 'facing X' polymorphic specifier.
Specifies 'heading', with dependencies depending on the form:
facing <number> -- no dependencies;
facing <field> -- depends on 'position'.
"""
if isinstance(heading, VectorField):
return Specifier('heading', DelayedArgument({'position'},
lambda self: heading[self.position]))
else:
heading = toHeading(heading, 'specifier "facing X" with X not a heading or vector field')
return Specifier('heading', heading)
def FacingToward(pos):
"""The 'facing toward <vector>' specifier.
Specifies 'heading', depending on 'position'.
"""
pos = toVector(pos, 'specifier "facing toward X" with X not a vector')
return Specifier('heading', DelayedArgument({'position'},
lambda self: self.position.angleTo(pos)))
def ApparentlyFacing(heading, fromPt=None):
"""The 'apparently facing <heading> [from <vector>]' specifier.
Specifies 'heading', depending on 'position'.
If the 'from <vector>' is omitted, the position of ego is used.
"""
heading = toHeading(heading, 'specifier "apparently facing X" with X not a heading')
if fromPt is None:
fromPt = ego()
fromPt = toVector(fromPt, 'specifier "apparently facing X from Y" with Y not a vector')
value = lambda self: fromPt.angleTo(self.position) + heading
return Specifier('heading', DelayedArgument({'position'}, value))
def LeftSpec(pos, dist=0):
"""The 'left of X [by Y]' polymorphic specifier.
Specifies 'position', depending on 'width'. See other dependencies below.
Allowed forms:
left of <oriented point> [by <scalar/vector>] -- optionally specifies 'heading';
left of <vector> [by <scalar/vector>] -- depends on 'heading'.
If the 'by <scalar/vector>' is omitted, zero is used.
"""
return leftSpecHelper('left of', pos, dist, 'width', lambda dist: (dist, 0),
lambda self, dx, dy: Vector(-self.width / 2 - dx, dy))
def RightSpec(pos, dist=0):
"""The 'right of X [by Y]' polymorphic specifier.
Specifies 'position', depending on 'width'. See other dependencies below.
Allowed forms:
right of <oriented point> [by <scalar/vector>] -- optionally specifies 'heading';
right of <vector> [by <scalar/vector>] -- depends on 'heading'.
If the 'by <scalar/vector>' is omitted, zero is used.
"""
return leftSpecHelper('right of', pos, dist, 'width', lambda dist: (dist, 0),
lambda self, dx, dy: Vector(self.width / 2 + dx, dy))
def Ahead(pos, dist=0):
"""The 'ahead of X [by Y]' polymorphic specifier.
Specifies 'position', depending on 'height'. See other dependencies below.
Allowed forms:
ahead of <oriented point> [by <scalar/vector>] -- optionally specifies 'heading';
ahead of <vector> [by <scalar/vector>] -- depends on 'heading'.
If the 'by <scalar/vector>' is omitted, zero is used.
"""
return leftSpecHelper('ahead of', pos, dist, 'height', lambda dist: (0, dist),
lambda self, dx, dy: Vector(dx, self.height / 2 + dy))
def Behind(pos, dist=0):
"""The 'behind X [by Y]' polymorphic specifier.
Specifies 'position', depending on 'height'. See other dependencies below.
Allowed forms:
behind <oriented point> [by <scalar/vector>] -- optionally specifies 'heading';
behind <vector> [by <scalar/vector>] -- depends on 'heading'.
If the 'by <scalar/vector>' is omitted, zero is used.
"""
return leftSpecHelper('behind', pos, dist, 'height', lambda dist: (0, dist),
lambda self, dx, dy: Vector(dx, -self.height / 2 - dy))
def leftSpecHelper(syntax, pos, dist, axis, toComponents, makeOffset):
extras = set()
dType = underlyingType(dist)
if dType is float or dType is int:
dx, dy = toComponents(dist)
elif dType is Vector:
dx, dy = dist
else:
raise RuntimeParseError(f'"{syntax} X by D" with D not a number or vector')
if isinstance(pos, OrientedPoint): # TODO too strict?
val = lambda self: pos.relativePosition(makeOffset(self, dx, dy))
new = DelayedArgument({axis}, val)
extras.add('heading')
else:
pos = toVector(pos, f'specifier "{syntax} X" with X not a vector')
val = lambda self: pos.offsetRotated(self.heading, makeOffset(self, dx, dy))
new = DelayedArgument({axis, 'heading'}, val)
return Specifier('position', new, optionals=extras)
| 35.633508
| 91
| 0.710452
|
acff24c5e61d8912098de13f15fc800ccbcb1e14
| 37,571
|
py
|
Python
|
python/istio_api/mixer/v1/mixer_pb2.py
|
jasonwzm/istio-api-x
|
01ec7524898b1ce77e87341ce08a6ed79efcd98f
|
[
"Apache-2.0"
] | null | null | null |
python/istio_api/mixer/v1/mixer_pb2.py
|
jasonwzm/istio-api-x
|
01ec7524898b1ce77e87341ce08a6ed79efcd98f
|
[
"Apache-2.0"
] | null | null | null |
python/istio_api/mixer/v1/mixer_pb2.py
|
jasonwzm/istio-api-x
|
01ec7524898b1ce77e87341ce08a6ed79efcd98f
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mixer/v1/mixer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from mixer.v1 import attributes_pb2 as mixer_dot_v1_dot_attributes__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/v1/mixer.proto',
package='istio.mixer.v1',
syntax='proto3',
serialized_options=_b('Z\025istio.io/api/mixer/v1\200\001\001\370\001\001\310\341\036\000\250\342\036\000\360\341\036\000'),
serialized_pb=_b('\n\x14mixer/v1/mixer.proto\x12\x0eistio.mixer.v1\x1a\x14gogoproto/gogo.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\x1a\x19mixer/v1/attributes.proto\"\xd0\x02\n\x0c\x43heckRequest\x12>\n\nattributes\x18\x01 \x01(\x0b\x32$.istio.mixer.v1.CompressedAttributesB\x04\xc8\xde\x1f\x00\x12\x19\n\x11global_word_count\x18\x02 \x01(\r\x12\x18\n\x10\x64\x65\x64uplication_id\x18\x03 \x01(\t\x12>\n\x06quotas\x18\x04 \x03(\x0b\x32(.istio.mixer.v1.CheckRequest.QuotasEntryB\x04\xc8\xde\x1f\x00\x1a\x32\n\x0bQuotaParams\x12\x0e\n\x06\x61mount\x18\x01 \x01(\x03\x12\x13\n\x0b\x62\x65st_effort\x18\x02 \x01(\x08\x1aW\n\x0bQuotasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.istio.mixer.v1.CheckRequest.QuotaParams:\x02\x38\x01\"\xed\x05\n\rCheckResponse\x12L\n\x0cprecondition\x18\x02 \x01(\x0b\x32\x30.istio.mixer.v1.CheckResponse.PreconditionResultB\x04\xc8\xde\x1f\x00\x12?\n\x06quotas\x18\x03 \x03(\x0b\x32).istio.mixer.v1.CheckResponse.QuotasEntryB\x04\xc8\xde\x1f\x00\x1a\x98\x02\n\x12PreconditionResult\x12(\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.StatusB\x04\xc8\xde\x1f\x00\x12;\n\x0evalid_duration\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xc8\xde\x1f\x00\x98\xdf\x1f\x01\x12\x17\n\x0fvalid_use_count\x18\x03 \x01(\x05\x12\x43\n\x15referenced_attributes\x18\x05 \x01(\x0b\x32$.istio.mixer.v1.ReferencedAttributes\x12\x37\n\x0froute_directive\x18\x06 \x01(\x0b\x32\x1e.istio.mixer.v1.RouteDirectiveJ\x04\x08\x04\x10\x05\x1a\xd7\x01\n\x0bQuotaResult\x12;\n\x0evalid_duration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xc8\xde\x1f\x00\x98\xdf\x1f\x01\x12\x16\n\x0egranted_amount\x18\x02 \x01(\x03\x12(\n\x06status\x18\x06 \x01(\x0b\x32\x12.google.rpc.StatusB\x04\xc8\xde\x1f\x00\x12I\n\x15referenced_attributes\x18\x05 \x01(\x0b\x32$.istio.mixer.v1.ReferencedAttributesB\x04\xc8\xde\x1f\x00\x1aX\n\x0bQuotasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x01(\x0b\x32).istio.mixer.v1.CheckResponse.QuotaResult:\x02\x38\x01\"\xca\x02\n\x14ReferencedAttributes\x12\r\n\x05words\x18\x01 \x03(\t\x12T\n\x11\x61ttribute_matches\x18\x02 \x03(\x0b\x32\x33.istio.mixer.v1.ReferencedAttributes.AttributeMatchB\x04\xc8\xde\x1f\x00\x1a\x81\x01\n\x0e\x41ttributeMatch\x12\x0c\n\x04name\x18\x01 \x01(\x11\x12\x41\n\tcondition\x18\x02 \x01(\x0e\x32..istio.mixer.v1.ReferencedAttributes.Condition\x12\r\n\x05regex\x18\x03 \x01(\t\x12\x0f\n\x07map_key\x18\x04 \x01(\x11\"I\n\tCondition\x12\x19\n\x15\x43ONDITION_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x41\x42SENCE\x10\x01\x12\t\n\x05\x45XACT\x10\x02\x12\t\n\x05REGEX\x10\x03\"\x9e\x01\n\x0fHeaderOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12<\n\toperation\x18\x03 \x01(\x0e\x32).istio.mixer.v1.HeaderOperation.Operation\"0\n\tOperation\x12\x0b\n\x07REPLACE\x10\x00\x12\n\n\x06REMOVE\x10\x01\x12\n\n\x06\x41PPEND\x10\x02\"\xe1\x01\n\x0eRouteDirective\x12H\n\x19request_header_operations\x18\x01 \x03(\x0b\x32\x1f.istio.mixer.v1.HeaderOperationB\x04\xc8\xde\x1f\x00\x12I\n\x1aresponse_header_operations\x18\x02 \x03(\x0b\x32\x1f.istio.mixer.v1.HeaderOperationB\x04\xc8\xde\x1f\x00\x12\x1c\n\x14\x64irect_response_code\x18\x03 \x01(\r\x12\x1c\n\x14\x64irect_response_body\x18\x04 \x01(\t\"\xb0\x02\n\rReportRequest\x12>\n\nattributes\x18\x01 \x03(\x0b\x32$.istio.mixer.v1.CompressedAttributesB\x04\xc8\xde\x1f\x00\x12`\n\x1drepeated_attributes_semantics\x18\x04 \x01(\x0e\x32\x39.istio.mixer.v1.ReportRequest.RepeatedAttributesSemantics\x12\x15\n\rdefault_words\x18\x02 \x03(\t\x12\x19\n\x11global_word_count\x18\x03 \x01(\r\"K\n\x1bRepeatedAttributesSemantics\x12\x12\n\x0e\x44\x45LTA_ENCODING\x10\x00\x12\x18\n\x14INDEPENDENT_ENCODING\x10\x01\"\x10\n\x0eReportResponse2\x9a\x01\n\x05Mixer\x12\x46\n\x05\x43heck\x12\x1c.istio.mixer.v1.CheckRequest\x1a\x1d.istio.mixer.v1.CheckResponse\"\x00\x12I\n\x06Report\x12\x1d.istio.mixer.v1.ReportRequest\x1a\x1e.istio.mixer.v1.ReportResponse\"\x00\x42)Z\x15istio.io/api/mixer/v1\x80\x01\x01\xf8\x01\x01\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,mixer_dot_v1_dot_attributes__pb2.DESCRIPTOR,])
_REFERENCEDATTRIBUTES_CONDITION = _descriptor.EnumDescriptor(
name='Condition',
full_name='istio.mixer.v1.ReferencedAttributes.Condition',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CONDITION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABSENCE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXACT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REGEX', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1495,
serialized_end=1568,
)
_sym_db.RegisterEnumDescriptor(_REFERENCEDATTRIBUTES_CONDITION)
_HEADEROPERATION_OPERATION = _descriptor.EnumDescriptor(
name='Operation',
full_name='istio.mixer.v1.HeaderOperation.Operation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REPLACE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REMOVE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPEND', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1681,
serialized_end=1729,
)
_sym_db.RegisterEnumDescriptor(_HEADEROPERATION_OPERATION)
_REPORTREQUEST_REPEATEDATTRIBUTESSEMANTICS = _descriptor.EnumDescriptor(
name='RepeatedAttributesSemantics',
full_name='istio.mixer.v1.ReportRequest.RepeatedAttributesSemantics',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DELTA_ENCODING', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INDEPENDENT_ENCODING', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2189,
serialized_end=2264,
)
_sym_db.RegisterEnumDescriptor(_REPORTREQUEST_REPEATEDATTRIBUTESSEMANTICS)
_CHECKREQUEST_QUOTAPARAMS = _descriptor.Descriptor(
name='QuotaParams',
full_name='istio.mixer.v1.CheckRequest.QuotaParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='amount', full_name='istio.mixer.v1.CheckRequest.QuotaParams.amount', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='best_effort', full_name='istio.mixer.v1.CheckRequest.QuotaParams.best_effort', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=394,
)
_CHECKREQUEST_QUOTASENTRY = _descriptor.Descriptor(
name='QuotasEntry',
full_name='istio.mixer.v1.CheckRequest.QuotasEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.CheckRequest.QuotasEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.CheckRequest.QuotasEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=483,
)
_CHECKREQUEST = _descriptor.Descriptor(
name='CheckRequest',
full_name='istio.mixer.v1.CheckRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='attributes', full_name='istio.mixer.v1.CheckRequest.attributes', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='global_word_count', full_name='istio.mixer.v1.CheckRequest.global_word_count', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deduplication_id', full_name='istio.mixer.v1.CheckRequest.deduplication_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quotas', full_name='istio.mixer.v1.CheckRequest.quotas', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CHECKREQUEST_QUOTAPARAMS, _CHECKREQUEST_QUOTASENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=147,
serialized_end=483,
)
_CHECKRESPONSE_PRECONDITIONRESULT = _descriptor.Descriptor(
name='PreconditionResult',
full_name='istio.mixer.v1.CheckResponse.PreconditionResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='istio.mixer.v1.CheckResponse.PreconditionResult.status', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valid_duration', full_name='istio.mixer.v1.CheckResponse.PreconditionResult.valid_duration', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000\230\337\037\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valid_use_count', full_name='istio.mixer.v1.CheckResponse.PreconditionResult.valid_use_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='referenced_attributes', full_name='istio.mixer.v1.CheckResponse.PreconditionResult.referenced_attributes', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='route_directive', full_name='istio.mixer.v1.CheckResponse.PreconditionResult.route_directive', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=647,
serialized_end=927,
)
_CHECKRESPONSE_QUOTARESULT = _descriptor.Descriptor(
name='QuotaResult',
full_name='istio.mixer.v1.CheckResponse.QuotaResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='valid_duration', full_name='istio.mixer.v1.CheckResponse.QuotaResult.valid_duration', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000\230\337\037\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='granted_amount', full_name='istio.mixer.v1.CheckResponse.QuotaResult.granted_amount', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='istio.mixer.v1.CheckResponse.QuotaResult.status', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='referenced_attributes', full_name='istio.mixer.v1.CheckResponse.QuotaResult.referenced_attributes', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=930,
serialized_end=1145,
)
_CHECKRESPONSE_QUOTASENTRY = _descriptor.Descriptor(
name='QuotasEntry',
full_name='istio.mixer.v1.CheckResponse.QuotasEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.CheckResponse.QuotasEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.CheckResponse.QuotasEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1147,
serialized_end=1235,
)
_CHECKRESPONSE = _descriptor.Descriptor(
name='CheckResponse',
full_name='istio.mixer.v1.CheckResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='precondition', full_name='istio.mixer.v1.CheckResponse.precondition', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quotas', full_name='istio.mixer.v1.CheckResponse.quotas', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CHECKRESPONSE_PRECONDITIONRESULT, _CHECKRESPONSE_QUOTARESULT, _CHECKRESPONSE_QUOTASENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=486,
serialized_end=1235,
)
_REFERENCEDATTRIBUTES_ATTRIBUTEMATCH = _descriptor.Descriptor(
name='AttributeMatch',
full_name='istio.mixer.v1.ReferencedAttributes.AttributeMatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.ReferencedAttributes.AttributeMatch.name', index=0,
number=1, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='condition', full_name='istio.mixer.v1.ReferencedAttributes.AttributeMatch.condition', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regex', full_name='istio.mixer.v1.ReferencedAttributes.AttributeMatch.regex', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='map_key', full_name='istio.mixer.v1.ReferencedAttributes.AttributeMatch.map_key', index=3,
number=4, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1364,
serialized_end=1493,
)
_REFERENCEDATTRIBUTES = _descriptor.Descriptor(
name='ReferencedAttributes',
full_name='istio.mixer.v1.ReferencedAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='words', full_name='istio.mixer.v1.ReferencedAttributes.words', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attribute_matches', full_name='istio.mixer.v1.ReferencedAttributes.attribute_matches', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_REFERENCEDATTRIBUTES_ATTRIBUTEMATCH, ],
enum_types=[
_REFERENCEDATTRIBUTES_CONDITION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1238,
serialized_end=1568,
)
_HEADEROPERATION = _descriptor.Descriptor(
name='HeaderOperation',
full_name='istio.mixer.v1.HeaderOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.HeaderOperation.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.HeaderOperation.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation', full_name='istio.mixer.v1.HeaderOperation.operation', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_HEADEROPERATION_OPERATION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1571,
serialized_end=1729,
)
_ROUTEDIRECTIVE = _descriptor.Descriptor(
name='RouteDirective',
full_name='istio.mixer.v1.RouteDirective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_header_operations', full_name='istio.mixer.v1.RouteDirective.request_header_operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response_header_operations', full_name='istio.mixer.v1.RouteDirective.response_header_operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='direct_response_code', full_name='istio.mixer.v1.RouteDirective.direct_response_code', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='direct_response_body', full_name='istio.mixer.v1.RouteDirective.direct_response_body', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1732,
serialized_end=1957,
)
_REPORTREQUEST = _descriptor.Descriptor(
name='ReportRequest',
full_name='istio.mixer.v1.ReportRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='attributes', full_name='istio.mixer.v1.ReportRequest.attributes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\310\336\037\000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repeated_attributes_semantics', full_name='istio.mixer.v1.ReportRequest.repeated_attributes_semantics', index=1,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_words', full_name='istio.mixer.v1.ReportRequest.default_words', index=2,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='global_word_count', full_name='istio.mixer.v1.ReportRequest.global_word_count', index=3,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REPORTREQUEST_REPEATEDATTRIBUTESSEMANTICS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1960,
serialized_end=2264,
)
_REPORTRESPONSE = _descriptor.Descriptor(
name='ReportResponse',
full_name='istio.mixer.v1.ReportResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2266,
serialized_end=2282,
)
_CHECKREQUEST_QUOTAPARAMS.containing_type = _CHECKREQUEST
_CHECKREQUEST_QUOTASENTRY.fields_by_name['value'].message_type = _CHECKREQUEST_QUOTAPARAMS
_CHECKREQUEST_QUOTASENTRY.containing_type = _CHECKREQUEST
_CHECKREQUEST.fields_by_name['attributes'].message_type = mixer_dot_v1_dot_attributes__pb2._COMPRESSEDATTRIBUTES
_CHECKREQUEST.fields_by_name['quotas'].message_type = _CHECKREQUEST_QUOTASENTRY
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['valid_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['referenced_attributes'].message_type = _REFERENCEDATTRIBUTES
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['route_directive'].message_type = _ROUTEDIRECTIVE
_CHECKRESPONSE_PRECONDITIONRESULT.containing_type = _CHECKRESPONSE
_CHECKRESPONSE_QUOTARESULT.fields_by_name['valid_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CHECKRESPONSE_QUOTARESULT.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_CHECKRESPONSE_QUOTARESULT.fields_by_name['referenced_attributes'].message_type = _REFERENCEDATTRIBUTES
_CHECKRESPONSE_QUOTARESULT.containing_type = _CHECKRESPONSE
_CHECKRESPONSE_QUOTASENTRY.fields_by_name['value'].message_type = _CHECKRESPONSE_QUOTARESULT
_CHECKRESPONSE_QUOTASENTRY.containing_type = _CHECKRESPONSE
_CHECKRESPONSE.fields_by_name['precondition'].message_type = _CHECKRESPONSE_PRECONDITIONRESULT
_CHECKRESPONSE.fields_by_name['quotas'].message_type = _CHECKRESPONSE_QUOTASENTRY
_REFERENCEDATTRIBUTES_ATTRIBUTEMATCH.fields_by_name['condition'].enum_type = _REFERENCEDATTRIBUTES_CONDITION
_REFERENCEDATTRIBUTES_ATTRIBUTEMATCH.containing_type = _REFERENCEDATTRIBUTES
_REFERENCEDATTRIBUTES.fields_by_name['attribute_matches'].message_type = _REFERENCEDATTRIBUTES_ATTRIBUTEMATCH
_REFERENCEDATTRIBUTES_CONDITION.containing_type = _REFERENCEDATTRIBUTES
_HEADEROPERATION.fields_by_name['operation'].enum_type = _HEADEROPERATION_OPERATION
_HEADEROPERATION_OPERATION.containing_type = _HEADEROPERATION
_ROUTEDIRECTIVE.fields_by_name['request_header_operations'].message_type = _HEADEROPERATION
_ROUTEDIRECTIVE.fields_by_name['response_header_operations'].message_type = _HEADEROPERATION
_REPORTREQUEST.fields_by_name['attributes'].message_type = mixer_dot_v1_dot_attributes__pb2._COMPRESSEDATTRIBUTES
_REPORTREQUEST.fields_by_name['repeated_attributes_semantics'].enum_type = _REPORTREQUEST_REPEATEDATTRIBUTESSEMANTICS
_REPORTREQUEST_REPEATEDATTRIBUTESSEMANTICS.containing_type = _REPORTREQUEST
DESCRIPTOR.message_types_by_name['CheckRequest'] = _CHECKREQUEST
DESCRIPTOR.message_types_by_name['CheckResponse'] = _CHECKRESPONSE
DESCRIPTOR.message_types_by_name['ReferencedAttributes'] = _REFERENCEDATTRIBUTES
DESCRIPTOR.message_types_by_name['HeaderOperation'] = _HEADEROPERATION
DESCRIPTOR.message_types_by_name['RouteDirective'] = _ROUTEDIRECTIVE
DESCRIPTOR.message_types_by_name['ReportRequest'] = _REPORTREQUEST
DESCRIPTOR.message_types_by_name['ReportResponse'] = _REPORTRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CheckRequest = _reflection.GeneratedProtocolMessageType('CheckRequest', (_message.Message,), dict(
QuotaParams = _reflection.GeneratedProtocolMessageType('QuotaParams', (_message.Message,), dict(
DESCRIPTOR = _CHECKREQUEST_QUOTAPARAMS,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckRequest.QuotaParams)
))
,
QuotasEntry = _reflection.GeneratedProtocolMessageType('QuotasEntry', (_message.Message,), dict(
DESCRIPTOR = _CHECKREQUEST_QUOTASENTRY,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckRequest.QuotasEntry)
))
,
DESCRIPTOR = _CHECKREQUEST,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckRequest)
))
_sym_db.RegisterMessage(CheckRequest)
_sym_db.RegisterMessage(CheckRequest.QuotaParams)
_sym_db.RegisterMessage(CheckRequest.QuotasEntry)
CheckResponse = _reflection.GeneratedProtocolMessageType('CheckResponse', (_message.Message,), dict(
PreconditionResult = _reflection.GeneratedProtocolMessageType('PreconditionResult', (_message.Message,), dict(
DESCRIPTOR = _CHECKRESPONSE_PRECONDITIONRESULT,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckResponse.PreconditionResult)
))
,
QuotaResult = _reflection.GeneratedProtocolMessageType('QuotaResult', (_message.Message,), dict(
DESCRIPTOR = _CHECKRESPONSE_QUOTARESULT,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckResponse.QuotaResult)
))
,
QuotasEntry = _reflection.GeneratedProtocolMessageType('QuotasEntry', (_message.Message,), dict(
DESCRIPTOR = _CHECKRESPONSE_QUOTASENTRY,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckResponse.QuotasEntry)
))
,
DESCRIPTOR = _CHECKRESPONSE,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.CheckResponse)
))
_sym_db.RegisterMessage(CheckResponse)
_sym_db.RegisterMessage(CheckResponse.PreconditionResult)
_sym_db.RegisterMessage(CheckResponse.QuotaResult)
_sym_db.RegisterMessage(CheckResponse.QuotasEntry)
ReferencedAttributes = _reflection.GeneratedProtocolMessageType('ReferencedAttributes', (_message.Message,), dict(
AttributeMatch = _reflection.GeneratedProtocolMessageType('AttributeMatch', (_message.Message,), dict(
DESCRIPTOR = _REFERENCEDATTRIBUTES_ATTRIBUTEMATCH,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.ReferencedAttributes.AttributeMatch)
))
,
DESCRIPTOR = _REFERENCEDATTRIBUTES,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.ReferencedAttributes)
))
_sym_db.RegisterMessage(ReferencedAttributes)
_sym_db.RegisterMessage(ReferencedAttributes.AttributeMatch)
HeaderOperation = _reflection.GeneratedProtocolMessageType('HeaderOperation', (_message.Message,), dict(
DESCRIPTOR = _HEADEROPERATION,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.HeaderOperation)
))
_sym_db.RegisterMessage(HeaderOperation)
RouteDirective = _reflection.GeneratedProtocolMessageType('RouteDirective', (_message.Message,), dict(
DESCRIPTOR = _ROUTEDIRECTIVE,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.RouteDirective)
))
_sym_db.RegisterMessage(RouteDirective)
ReportRequest = _reflection.GeneratedProtocolMessageType('ReportRequest', (_message.Message,), dict(
DESCRIPTOR = _REPORTREQUEST,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.ReportRequest)
))
_sym_db.RegisterMessage(ReportRequest)
ReportResponse = _reflection.GeneratedProtocolMessageType('ReportResponse', (_message.Message,), dict(
DESCRIPTOR = _REPORTRESPONSE,
__module__ = 'mixer.v1.mixer_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.ReportResponse)
))
_sym_db.RegisterMessage(ReportResponse)
DESCRIPTOR._options = None
_CHECKREQUEST_QUOTASENTRY._options = None
_CHECKREQUEST.fields_by_name['attributes']._options = None
_CHECKREQUEST.fields_by_name['quotas']._options = None
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['status']._options = None
_CHECKRESPONSE_PRECONDITIONRESULT.fields_by_name['valid_duration']._options = None
_CHECKRESPONSE_QUOTARESULT.fields_by_name['valid_duration']._options = None
_CHECKRESPONSE_QUOTARESULT.fields_by_name['status']._options = None
_CHECKRESPONSE_QUOTARESULT.fields_by_name['referenced_attributes']._options = None
_CHECKRESPONSE_QUOTASENTRY._options = None
_CHECKRESPONSE.fields_by_name['precondition']._options = None
_CHECKRESPONSE.fields_by_name['quotas']._options = None
_REFERENCEDATTRIBUTES.fields_by_name['attribute_matches']._options = None
_ROUTEDIRECTIVE.fields_by_name['request_header_operations']._options = None
_ROUTEDIRECTIVE.fields_by_name['response_header_operations']._options = None
_REPORTREQUEST.fields_by_name['attributes']._options = None
_MIXER = _descriptor.ServiceDescriptor(
name='Mixer',
full_name='istio.mixer.v1.Mixer',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=2285,
serialized_end=2439,
methods=[
_descriptor.MethodDescriptor(
name='Check',
full_name='istio.mixer.v1.Mixer.Check',
index=0,
containing_service=None,
input_type=_CHECKREQUEST,
output_type=_CHECKRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Report',
full_name='istio.mixer.v1.Mixer.Report',
index=1,
containing_service=None,
input_type=_REPORTREQUEST,
output_type=_REPORTRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MIXER)
DESCRIPTOR.services_by_name['Mixer'] = _MIXER
# @@protoc_insertion_point(module_scope)
| 43.135476
| 4,101
| 0.765244
|
acff251cbddd4e5f881343f166fe65b7f9d48f85
| 87,500
|
py
|
Python
|
my_server/dist/ba_root/mods/chatCmd.py
|
FireFighter1027/ba_1.6
|
8e0460c4d7228102726578fb4acd4ff081e12fc3
|
[
"MIT"
] | 6
|
2021-04-16T14:25:25.000Z
|
2021-11-18T17:20:19.000Z
|
my_server/dist/ba_root/mods/chatCmd.py
|
FireFighter1027/ba_1.6
|
8e0460c4d7228102726578fb4acd4ff081e12fc3
|
[
"MIT"
] | 1
|
2021-08-30T10:09:06.000Z
|
2021-09-21T10:44:15.000Z
|
my_server/dist/ba_root/mods/chatCmd.py
|
FireFighter1027/ba_1.6
|
8e0460c4d7228102726578fb4acd4ff081e12fc3
|
[
"MIT"
] | 2
|
2021-04-20T15:39:27.000Z
|
2021-07-18T08:45:56.000Z
|
# -*- coding: utf-8 -*-
# coding: utf-8
# ba_meta require api 6
import ba,_ba,random,os,json,roles,mysettings
from mysettings import *
from typing import List, Sequence, Optional, Dict, Any
reply = None
client_str = None
uniqueID = None
commandByCoin = False
commandSuccess = False
costOfCommand = None
def clientIdFromNick(nick):
client_id = None
for i in _ba.get_game_roster():
if len(i['players']) > 0:
name = i['players'][0]['name_full']
else:
name = i['display_string']
if name.lower().find(nick.lower()) != -1:
#player found go ahead
client_id = i['client_id']
break
if client_id == None: _ba.chatmessage('player not found')
return client_id
def playerIdFromNick(nick):
p = _ba.get_foreground_host_activity().players
for i in p:
name = i.getname()
if name.lower().find(nick.lower()) != -1:
return p.index(i)
_ba.chatmessage('player not found')
return None
class chatOptions(object):
def __init__(self):
self.all = True
self.tint = None
def checkDevice(self, client_id: int, msg: str):
global reply
global clientID
global client_str
global uniqueID
global commandByCoin
global commandSuccess
global costOfCommand
clientID = client_id
#Check Commands.txt to check all types of cmds.
#Update The Below Lists for perfect Restrictions
publicCmd = ['/me', '/stats', '/rank', '/myself', '/id', '/list', '/', '/help']
adminDeny = ['/kickvote','/top','/setScore','/reset','/warn','/clearwarns','/whoinqueue','/text','/admin','/ban','/special','/partyname','/party','/pause','/setscreentextcolor', '/settextcolor', '/setscreencolor','/setchatcooldowntime', '/setchatcdtime', '/setchattime','/settings']
topperDeny = adminDeny + ['/mute','/unmute','/kick','/kickall','/remove','/shatter','/quit','/restartserver','/restart','/reflections','/floorreflection','/icy','/exchange','/vip','/maxPlayers','/say']
if settings['enableCoinSystem']:
import coinSystem
publicCmd = publicCmd + ['/donate','/buy','/shop','/scoretocash','/cashtoscore']
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}):
if i['client_id'] == clientID:
client_str = i['players'][0]['name']
uniqueID = i['account_id']
cmd = msg.split(' ')[0]
ecs = settings['enableCoinSystem']
with ba.Context(_ba.get_foreground_host_activity()):
if cmd in publicCmd: return True
if (uniqueID in roles.owners):
if ecs : reply = u"\ue043O.W.N.E.R, Command Accepted\ue043"
else: reply = ":)"
return True
if (uniqueID in roles.admins) and (cmd not in adminDeny):
if ecs: reply = u"\ue043A.D.M.I.N, Command Accepted\ue043"
else: reply = ":)"
return True
if (uniqueID in roles.vips) and (cmd not in topperDeny):
if ecs: reply = u"\ue043V.I.P, Command Accepted\ue043"
else: reply = ":)"
return True
if (settings['enableTop5commands']) and (uniqueID in roles.toppersList) and (cmd not in topperDeny):
if ecs: reply = u"\ue043TOP 5 PLAYER, Command Accepted\ue043"
else: reply = ":)"
return True
if (uniqueID in roles.special) and (cmd in roles.special[uniqueID]):
reply = ":)"
return True
if ecs:
if cmd in availableCommands:
user_bal = coinSystem.getCoins(uniqueID)
costOfCommand = availableCommands[cmd]
if (user_bal >= cost_of_cmd):
commandByCoin = True
return True
else:
ba.screenmessage(f"You need {tic}{str(costOfCommand)} for that, You have {tic}{str(user_bal)} only.",color=(1,0,0),clients=[clientID],transient=True)
return False
else: return False
def kickByNick(self, nick: str):
roster = _ba.get_game_roster()
for i in roster:
try:
if i['players'][0]['name_full'].lower().find(nick.encode('utf-8').lower()) != -1:
_ba.disconnect_client(int(i['client_id']))
except:
pass
def opt(self, msg: str, clientID: int):
if settings['enableCoinSystem']:
import coinSystem
from datetime import datetime, timedelta
commandSuccess = False
ros = _ba.get_game_roster()
allUser = [int(u['client_id']) for u in ros]
for i in ros:
if (i is not None) and (i != {}):
if i['client_id'] == clientID:
client_str = i['players'][0]['name']
uniqueID = i['account_id']
#Main Base Variables
activity = _ba.get_foreground_host_activity()
session = _ba.get_foreground_host_session()
players = activity.players
splayers = session.sessionplayers
pID = playerIdFromNick(client_str)
if self.checkDevice(clientID,msg):
m = msg.split(' ')[0].lower()
a = msg.split(' ')[1:]
with ba.Context(activity):
###################### PUBLIC COMMANDS #########################
#HELP
if m == '/help':
if uniqueID in roles.owners:
thing = {}
for k,v in settings.items():
thing[k] = v
for k,v in powerups.items():
thing[k] = v
string = ''
separator = ' '
for x in thing:
string += f"{x}----{str(thing[x])}{separator}"
if separator == ' ': separator = '\n'
else: separator = ' '
ba.screenmessage(string, clients=[clientID], transient=True)
else:
ba.screenmessage(f"Use '/shop' to check what you can buy.",color=(1,0,0),clients=[clientID],transient=True)
#LIST
elif m == '/list':
#string = u'==Name========ClientID====PlayerID==\n'
string = u"{0:^16}{1:^15}{2:^10}\n------------------------------------------------------------------------------\n".format('Name','ClientID','PlayerID')
lname = None
lcid = None
lpid = None
for i in _ba.get_game_roster():
if i['players'] == []:
lname = str(i['display_string'])
lcid = str(i['client_id'])
lpid = str('In Lobby')
string += u"{0:^16}{1:^15}{2:^10}\n".format(lname, lcid, lpid)
else:
for lp in i['players']:
lname = lp['name_full']
lcid = i['client_id']
lpid = lp['id']
string += u"{0:^16}{1:^15}{2:^10}\n".format(lname, lcid, lpid)
ba.screenmessage(string, transient=True, color=(1, 1, 1), clients=[clientID])
#ID
elif m == '/id':
if a == []:
ba.screenmessage(f"Unique_ID of {client_str} => '{uniqueID}'", clients=[clientID], transient=True)
else:
#try:
if True:
for i in _ba.get_game_roster():
if str(i['client_id']) == a[0]:
admins = roles.owners + roles.admins
if True: # (uniqueID in admins):
_ba.chatmessage(f"Unique_ID of {str(i['display_string'])} => '{str(i['account_id'])}'")
'''except:
pass'''
#STATS
elif m in ('/me', '/stats', '/rank', '/myself'):
if enableStats: printStatsByID(uniqueID)
else: sendError(f"Stats Disabled !",clientID)
#DONATE
elif m == '/donate' and settings['enableCoinSystem']:
try:
if len(a) < 2: ba.screenmessage(f"Usage: /donate [amount] [clientID]", transient=True, clients=[clientID])
else:
transfer = int(a[0])
if transfer < 100:
sendError(f"You can only transfer more than {tic}100.",clientID)
return
sendersID = uniqueID
receiversID = None
for player in aplayers:
clID = player.inputdevice.get_client_id()
aid = player.get_account_id()
if clID == int(a[1]):
receiversID = aid
name = player.getname()
if None not in [sendersID, receiversID]:
if sendersID == receiversID: sendError('You can\'t transfer to your own account',clientID)
elif coinSystem.getCoins(sendersID) < transfer: _ba.chatmessage(f"Not enough {tic}s to perform transaction")
else:
coinSystem.addCoins(sendersID, int(transfer * -1))
coinSystem.addCoins(receiversID, int(transfer))
_ba.chatmessage(f"Successfully transfered {tic}{str(a[0])} to {name}'s account.")
else:
sendError('Player not Found in current game !',clientID)
except:
ba.screenmessage('Usage: /donate amount clientID', transient=True, clients=[clientID])
#BUY
elif m == '/buy' and settings['enableCoinSystem']:
if a == []:
_ba.chatmessage('Usage: /buy item_name')
elif a[0] in availableEffects:
effect = a[0]
costOfEffect = availableEffects[effect]
haveCoins = coinSystem.getCoins(uniqueID)
if haveCoins >= costOfEffect:
customers = roles.effectCustomers
if uniqueID not in customers:
expiry = datetime.now() + timedelta(days=1)
customers[uniqueID] = {'effect': effect, 'expiry': expiry.strftime('%d-%m-%Y %H:%M:%S')}
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[0] = 'effectCustomers = ' + str(customers) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
coinSystem.addCoins(uniqueID, costOfEffect * -1)
_ba.chatmessage(f"Success! That cost you {tic}{str(costOfEffect)}")
else:
activeEffect = customers[uniqueID]['effect']
sendError(f"You already have {activeEffect} effect active",clientID)
else:
sendError(f"You need {tic}{str(costOfEffect)} for that, You have {tic}{str(haveCoins)} only.",clientID)
else: sendError(f"invalid item, try using '/shop effects'.",clientID)
#SHOP
elif m == '/shop' and settings['enableCoinSystem']:
string = '==You can buy following items==\n'
if a == []: ba.screenmessage('Usage: /shop commands or /shop effects', transient=True, clients=[clientID])
elif a[0].startswith('effects'):
for x in availableEffects:
string += f"{x}----{tic}{str(availableEffects[x])}----for 1 day\n"
ba.screenmessage(string, transient=True, color=(0, 1, 0), clients=[clientID])
elif a[0].startswith('commands'):
separator = ' '
for x in availableCommands:
string += f"{x}----{tic}{str(availableCommands[x])}{separator}"
if separator == ' ': separator = '\n'
else: separator = ' '
ba.screenmessage(string, transient=True, color=(0, 1, 0), clients=[clientID])
else: ba.screenmessage('Usage: /shop commands or /shop effects', transient=True, clients=[clientID])
#CASH TO SCORE
elif m == '/cashtoscore' and settings['enableCoinSystem']:
try:
coins = int(a[0])
haveCoins = coinSystem.getCoins(uniqueID)
if haveCoins < coins:
sendError(f"Not enough {tic}s to perform the transaction",clientID)
elif coins < 100:
sendError(f"You can only convert more than {tic}100",clientID)
else:
coinSystem.addCoins(uniqueID, coins * -1)
stats = getStats()
equivalentScore = int(coins * 5 * 0.9)
stats[uniqueID]['scores'] += equivalentScore
f = open(statsFile, 'w')
f.write(json.dumps(stats))
ba.screenmessage(f'Transaction Successful', color=(0,1,0))
f.close()
_ba.chatmessage(f"{str(equivalentScore)} scores added to your account stats. [10% transaction fee deducted]")
import mystats
mystats.refreshStats()
except:
ba.screenmessage('Usage: /cashtoscore amount_of_cash', transient=True, clients=[clientID])
#SCORE TO CASH
elif m == '/scoretocash' and settings['enableCoinSystem']:
try:
score = int(a[0])
stats = getStats()
haveScore = stats[uniqueID]['scores']
if haveScore < score:
sendError('Not enough scores to perform the transaction',clientID)
elif score < 500:
sendError('You can only convert more than 500 scores',clientID)
else:
f = open(statsFile, 'w')
stats[uniqueID]['scores'] -= score
f.write(json.dumps(stats))
equivalentCoins = int(score / 5 * 0.9)
coinSystem.addCoins(uniqueID, equivalentCoins)
ba.screenmessage('Transaction Successful', color=(0, 1, 0))
f.close()
_ba.chatmessage(f"{tic}{str(equivalentCoins)} added to your account. [10% transaction fee deducted]")
import mystats
mystats.refreshStats()
except:
ba.screenmessage('Usage: /scoretocash amount_of_score', transient=True, clients=[clientID])
################## COIN SYSTEM COMMANDS #####################
#NV
elif m == '/nv':
if self.tint is None:
self.tint = activity.globalsnode.tint
activity.globalsnode.tint = (0.5,0.7,1) if a == [] or not a[0] == u'off' else self.tint
commandSuccess=True
#OOH
elif m == '/ooh':
if a is not None and len(a) > 0:
s = int(a[0])
def oohRecurce(c):
ba.playsound(ba.getsound('ooh'), volume =2)
c -= 1
if c > 0:
ba.Timer(int(a[1]) if len(a) > 1 and a[1] is not None else 1000, ba.Call(oohRecurce, c=c))
return
oohRecurce(c=s)
else:
ba.playsound(ba.getsound('ooh'), volume =2)
commandSuccess = True
#PLAYSOUND
elif m == '/playsound':
try:
if a is not None and len(a) > 1:
s = int(a[1])
def oohRecurce(c):
ba.playsound(ba.getsound(str(a[0])), volume =2)
c -= 1
if c > 0:
ba.Timer(int(a[2]) if len(a) > 2 and a[2] is not None else 1000, ba.Call(oohRecurce, c=c))
return
oohRecurce(c=s)
else:
ba.playsound(ba.getsound(str(a[0])), volume =2)
commandSuccess = True
except:
ba.screenmessage('Usage: /playsound music times', transient=True, clients=[clientID])
#BOX
elif m == '/box':
try:
try:
if a != []:
n = int(a[0])
else:
n = pID
players[n].actor.node.torso_model = ba.getmodel("tnt");
players[n].actor.node.color_mask_texture = ba.gettexture("tnt");
players[n].actor.node.color_texture = ba.gettexture("tnt")
players[n].actor.node.highlight = (1,1,1);
players[n].actor.node.color = (1,1,1);
players[n].actor.node.head_model = None;
players[n].actor.node.style = "cyborg";
commandSuccess = True
except:
pass
except:
ba.screenmessage(f"Using: /boxall [or] /box [PlayerID]", transient=True, clients=[clientID])
#BOXALL
elif m == '/boxall':
try:
for i in players:
try:
i.actor.node.torso_model = ba.getmodel("tnt")
i.actor.node.color_mask_texture = ba.gettexture("tnt")
i.actor.node.color_texture = ba.gettexture("tnt")
i.actor.node.highlight = (1,1,1)
i.actor.node.color = (1,1,1)
i.actor.node.style = "cyborg"
commandSuccess = True
except:
pass
except:
pass
#SPAZ
elif m == '/spaz':
try:
try:
if a != []:
n = int(a[0])
else:
n = pID
t = players[n].actor.node
t.color_texture = ba.gettexture(a[1]+"Color")
t.color_mask_texture = ba.gettexture(a[1]+"ColorMask")
t.head_model = ba.getmodel(a[1]+"Head")
t.torso_model = ba.getmodel(a[1]+"Torso")
t.pelvis_model = ba.getmodel(a[1]+"Pelvis")
t.upper_arm_model = ba.getmodel(a[1]+"UpperArm")
t.forearm_model = ba.getmodel(a[1]+"ForeArm")
t.hand_model = ba.getmodel(a[1]+"Hand")
t.upper_leg_model = ba.getmodel(a[1]+"UpperLeg")
t.lower_leg_model = ba.getmodel(a[1]+"LowerLeg")
t.toes_model = ba.getmodel(a[1]+"Toes")
t.style = a[1]
commandSuccess=True
except:
pass
except:
ba.screenmessage(f"Using: /spazall [AppearanceName] [or] /spaz [PlayerID] [AppearanceName]", transient=True, clients=[clientID])
#SPAZALL
elif m == '/spazall':
for i in players:
t = i.actor.node
try:
t.color_texture = ba.gettexture(a[1]+"Color")
t.color_mask_texture = ba.gettexture(a[1]+"ColorMask")
t.head_model = ba.getmodel(a[1]+"Head")
t.torso_model = ba.getmodel(a[1]+"Torso")
t.pelvis_model = ba.getmodel(a[1]+"Pelvis")
t.upper_arm_model = ba.getmodel(a[1]+"UpperArm")
t.forearm_model = ba.getmodel(a[1]+"ForeArm")
t.hand_model = ba.getmodel(a[1]+"Hand")
t.upper_leg_model = ba.getmodel(a[1]+"UpperLeg")
t.lower_leg_model = ba.getmodel(a[1]+"LowerLeg")
t.toes_model = ba.getmodel(a[1]+"Toes")
t.style = a[1]
commandSuccess=True
except:
ba.screenmessage(f"Using: /spazall [AppearanceName] [or] /spaz [PlayerID] [AppearanceName]", transient=True, clients=[clientID])
#INV
elif m == '/inv':
try:
if True:
if a != []:
n = int(a[0])
else:
n = pID
t = players[n].actor.node
t.head_model = None
t.torso_model = None
t.pelvis_model = None
t.upper_arm_model = None
t.forearm_model = None
t.hand_model = None
t.upper_leg_model = None
t.lower_leg_model = None
t.toes_model = None
t.style = "cyborg"
commandSuccess=True
'''except:
pass'''
except:
ba.screenmessage(f"Using: /invall [or] /inv [PlayerID]", transient=True, clients=[clientID])
#INVALL
elif m == '/invall':
try:
for i in players:
t = i.actor.node
try:
t.head_model = None
t.torso_model = None
t.pelvis_model = None
t.upper_arm_model = None
t.forearm_model = None
t.hand_model = None
t.upper_leg_model = None
t.lower_leg_model = None
t.toes_model = None
t.style = "cyborg"
commandSuccess=True
except:
pass
except:
pass
#TEX
elif m == '/tex':
try:
if len(a) > 1: n = int(a[0])
else: n = pID
color = None
if (len(a) > 1) and (str(a[1]) == 'kronk'): color = str(a[1])
else:color = str(a[0]) + 'Color'
try:
players[n].actor.node.color_mask_texture= ba.gettexture(str(a[1]));
players[n].actor.node.color_texture= ba.gettexture(str(a[1]))
commandSuccess=True
except:
ba.screenmessage(f"Using: /texall [texture] [or] /tex [PlayerID] [texture]", transient=True, clients=[clientID])
except:
ba.screenmessage(f"Using: /texall [texture] [or] /tex [PlayerID] [texture]", transient=True, clients=[clientID])
#TEXALL
elif m == '/texall':
try:
color = None
if str(a[0]) == 'kronk':
color = str(a[0])
else:color = str(a[0]) + 'Color'
for i in players:
try:
i.actor.node.color_mask_texture= ba.gettexture(str(a[0]) + 'ColorMask')
i.actor.node.color_texture= ba.gettexture(color)
commandSuccess=True
except:
pass
except:
ba.screenmessage(f"Using: /texall [texture] [or] /tex [PlayerID] [texture]", transient=True, clients=[clientID])
#FREEZE
elif m == '/freeze':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /freezeall [or] /freeze [PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.handlemessage(ba.FreezeMessage())
commandSuccess=True
#FREEZEALL
elif m == '/freezeall':
for i in players:
try:
i.actor.node.handlemessage(ba.FreezeMessage())
commandSuccess=True
except:
pass
#SLEEP
elif m == '/sleep':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /sleepall [or] /sleep [PlayerID] [secToSleep]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.handlemessage("knockout",int(a[1])*1000+1)
commandSuccess=True
#SLEEPALL
elif m == '/sleepall':
for i in players:
try:
i.actor.node.handlemessage("knockout",int(a[1])*1000+1)
commandSuccess=True
except:
pass
#THAW
elif m == '/thaw':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /thawall [or] /thaw[PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.handlemessage(ba.ThawMessage())
commandSuccess=True
#THAWALL
elif m == '/thawall':
for i in players:
try:
i.actor.node.handlemessage(ba.ThawMessage())
commandSuccess=True
except:
pass
#KILL
elif m == '/kill':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /killall [or] /kill [PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.handlemessage(ba.DieMessage())
commandSuccess=True
#KILLALL
elif m == '/killall':
for i in players:
try:
i.actor.node.handlemessage(ba.DieMessage())
commandSuccess=True
except:
pass
#END
elif m == '/end':
try:
_ba.get_foreground_host_activity().end_game()
commandSuccess=True
except:
pass
#HUG
elif m == '/hug':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /hugall [or] /hug [player1Index] [player2Index]", transient=True, clients=[clientID])
else:
try:
players[int(a[0])].actor.node.hold_node = players[int(a[1])].actor.node
commandSuccess=True
except:
pass
#HUGALL
elif m == '/hugall':
try:
players[0].actor.node.hold_node = players[1].actor.node
commandSuccess=True
except:
pass
try:
players[1].actor.node.hold_node = players[0].actor.node
commandSuccess=True
except:
pass
try:
players[2].actor.node.hold_node = players[3].actor.node
commandSuccess=True
except:
pass
try:
players[3].actor.node.hold_node = players[2].actor.node
commandSuccess=True
except:
pass
try:
players[4].actor.node.hold_node = players[5].actor.node
commandSuccess=True
except:
pass
try:
players[5].actor.node.hold_node = players[4].actor.node
commandSuccess=True
except:
pass
try:
players[6].actor.node.hold_node = players[7].actor.node
commandSuccess=True
except:
pass
try:
players[7].actor.node.hold_node = players[6].actor.node
commandSuccess=True
except:
pass
#SM
elif m == '/sm':
activity.globalsnode.slow_motion = activity.globalsnode.slow_motion == False
commandSuccess=True
#FLY
elif m == '/fly':
try:
try:
playerID = int(a[0])
except:
playerID = pID
players[playerID].actor.node.fly = players[playerID].actor.node.fly == False
commandSuccess = True
except:
bsInternal._chatMessage('Failed!!! Usage: /flyall or /fly number of list', transient=True, clients=[clientID])
#FLYALL
elif m == '/flyall':
for i in players:
i.actor.node.fly = i.actor.node.fly == False
commandSuccess = True
#CURSE
elif m == '/curse':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /curseall [or] /curse [PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.curse()
commandSuccess=True
#CURSEALL
elif m == '/curseall':
for i in players:
try:
i.actor.curse()
except:
pass
commandSuccess=True
#HEAL
elif m == '/heal':
try:
if len(a) > 0: n = int(a[0])
else: n = pID
players[n].actor.node.handlemessage(ba.PowerupMessage('health'))
except:
ba.screenmessage(f"Using: /healall [or] /heal [PlayerID]", transient=True, clients=[clientID])
commandSuccess=True
#HEALALL
elif m == '/healall':
for i in players:
try:
i.actor.node.handlemessage(ba.PowerupMessage('health'))
except:
pass
commandSuccess=True
#CUSTOM
elif m == '/custom':
if True: #try:
clID = int(a[0])
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
new = i['account_id']
a[2] = a[2].replace('_',' ')
commandSuccess = False
if a[1] == 'add':
if (len(a) > 3) and (not str(a[2]).startswith('perm')) and (new not in roles.customTag):
roles.customTag[new] = str(a[2])
n = 1
string = 'customTag'
updated = roles.customTag
commandSuccess=True
if (len(a) < 3) and (new not in roles.customList):
roles.customList.append(new)
n = 4
string = 'customList'
updated = roles.customList
commandSuccess=True
if a[1] == 'remove':
if (new in roles.customTag):
roles.customTag.pop(new)
n = 1
string = 'customTag'
updated = roles.customTag
commandSuccess=True
if (new in roles.customList):
roles.customList.remove(new)
n = 4
string = 'customList'
updated = roles.customList
commandSuccess=True
def save_data():
m = open(membersFile, 'r')
d = json.loads(m.read())
if string == 'customTag': d['customTag'] = roles.customTag
if string == 'customList': d['customList'] = roles.customList
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[n] = str(string) + ' = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
if commandByCoin and commandSuccess:
save_data()
exp = datetime.now() + timedelta(days=1)
cms = coinSystem.tag_customers
cms[newID] = {'expiry':exp, 'type':string, 'line':n}
with open(python_path + '/coinSystem.py') as (FILE):
line = [row for row in FILE]
line[0] = 'tag_customers = ' + str(cms) + '\n'
f = open(python_path + '/coinSystem.py', 'w')
for i in line:
f.write(i)
f.close()
if (uniqueID in roles.owners) and (commandSuccess == True):
if (str(a[2]).startswith('perm')) or (str(a[3]).startswith('perm')): save_data()
'''except:
ba.screenmessage(f"Using: /custom [ClientID] add/remove (Optional: TAG) (Optional: permanent)", clients=[clientID], transient=True)'''
#################### ADMIN COMMANDS ########################
#BUNNY
elif m == '/bunnyNotYetModded':
try:
"""
import BuddyBunny
for i in range(int(a[0])):
p=ba.getactivity().players[int(a[1])]
if not 'bunnies' in p.gameData:
p.gameData['bunnies'] = BuddyBunny.BunnyBotSet(p)
p.gameData['bunnies'].doBunny()
commandSuccess=True
"""
_ba.chatmessage("'/bunny' command removed !")
except:
_ba.chatmessage("'/bunny' command removed !")
#ba.screenmessage(f"Using: /bunny [count] [owner's_PlayerID]", transient=True, clients=[clientID])
#LOAD MESSAGES
elif m == '/lm':
arr = []
for i in range(len(_ba.get_chat_messages())):
if True: #try:
arr.append(_ba.get_chat_messages()[(-1 - i)])
'''except:
pass'''
arr.reverse()
for i in arr:
_ba.chatmessage(i)
commandSuccess = True
#GET PROFILES
elif m == '/gp':
try:
try:
playerID = int(a[0])
except:
playerID = playerIdFromNick(a[0])
num = 1
for i in splayers[playerID].inputdevice.get_player_profiles():
try:
_ba.chatmessage(f"{num}) - {i}")
num += 1
except:
pass
commandSuccess = True
except:
ba.screenmessage(f"Using: /gp number of list", transient=True, clients=[clientID])
#WHO IS
elif m == '/whois':
try:
#clID = int(a[0])
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = ''
for i in splayers:
if i.inputdevice.client_id == clID:
ID = i.get_account_id()
name = i.getname()
if (ID != '') and (ID is not None) and (ID != 'null'):
with open(playerLogFile,'r') as f:
allPlayers = json.loads(f.read())
allID = allPlayers[ID]
string = f'Login ID of {name} is:'
for i in allID:
#_ba.chatmessage(i)
if (i != ID): string += '\n' + i
ba.screenmessage(string, transient=True, color=(1, 1, 1))
commandSuccess = True
except:
ba.screenmessage(f"Using: /whois [ClientID or Name]", clients=[clientID], transient=True)
#MUTE
elif m == '/mute':
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
if (ID not in [None, 'null']):
try:
chatFilter.chatCoolDown[ID] = a[1] * 60
sendError(f'{name} muted for {str(a[1])} minutes.')
commandSuccess = True
except:
chatFilter.chatCoolDown[ID] = 99999 * 60
sendError(f'{name} muted until server restarts.')
commandSuccess = True
else:
sendError(f"{name} is already muted", clientID)
except:
ba.screenmessage(f"Usage: /mute <ClientId/Name> <Minutes>", clients=[clientID], transient=True)
#UN MUTE
elif m == '/unmute':
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
if (ID not in [None, 'null']) and (ID in chatFilter.chatCoolDown) and (chatFilter.chatCoolDown[ID] > 3):
chatFilter.chatCoolDown.pop(ID)
_ba.chatmessage(f'Unmuted {name}')
commandSuccess = True
else:
sendError(f"{name} is not muted yet", clientID)
except:
ba.screenmessage(f"Usage: /unmute <ClientId/Name>", clients=[clientID], transient=True)
#KICK
elif m == '/kick':
if a == []:
ba.screenmessage(f"Using: /kick [name/ClientID]", clients=[clientID], transient=True)
else:
if len(a[0]) > 3:
self.kickByNick(a[0])
else:
try:
s = int(a[0])
_ba.disconnect_client(int(a[0]))
except:
self.kickByNick(a[0])
commandSuccess=True
#KICK
elif m == '/kickall':
try:
for i in ros:
if i['client_id'] != clientID:
_ba.disconnect_client(i['client_id'])
commandSuccess=True
except:
pass
#REMOVE
elif m == '/remove':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /removeall [or] /remove [PlayerID]", transient=True, clients=[clientID])
else:
ba.getactivity().remove_player(splayers[int(a[0])])
commandSuccess=True
#REMOVEALL
elif m == '/removeall':
for i in splayers:
try:
ba.getactivity().remove_player(i)
except:
pass
commandSuccess=True
#SHATTER
elif m == '/shatter':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /shatterall [or] /shatter [PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.shattered = int(a[1])
commandSuccess=True
#SHATTERALL
elif m == '/shatterall':
for i in players:
i.actor.node.shattered = int(a[1])
commandSuccess=True
#QUIT
elif m in ('/quit', '/restart', '/restartserver'):
_ba.chatmessage("Server Restarting, Please Join in a moment !")
commandSuccess=True
_ba.quit()
#AC
elif m == '/ac':
try:
if a[0] == 'r':
m = 1.3 if a[1] is None else float(a[1])
s = 1000 if a[2] is None else float(a[2])
ba.animate_array(activity.globalsnode, 'ambient_color',3, {0: (1*m,0,0), s: (0,1*m,0),s*2:(0,0,1*m),s*3:(1*m,0,0)},True)
commandSuccess=True
else:
try:
if a[1] is not None:
activity.globalsnode.ambient_color = (float(a[0]),float(a[1]),float(a[2]))
commandSuccess=True
except:
pass
except:
ba.screenmessage(f"Using: '/ac [Red] [Green] [Blue]' or '/ac r [brightness] [speed]'", transient=True, clients=[clientID])
#TINT
elif m == '/tint':
try:
if a[0] == 'r':
m = 1.3 if a[1] is None else float(a[1])
s = 1000 if a[2] is None else float(a[2])
ba.animate_array(activity.globalsnode, 'tint',3, {0: (1*m,0,0), s: (0,1*m,0),s*2:(0,0,1*m),s*3:(1*m,0,0)},True)
commandSuccess=True
else:
if a[1] is not None:
activity.globalsnode.tint = (float(a[0]),float(a[1]),float(a[2]))
commandSuccess=True
else:
pass
except:
ba.screenmessage(f"Using: '/tint [Red] [Green] [Blue]' or '/tint r [brightness] [speed]'", transient=True, clients=[clientID])
#REFLECTIONS
elif m.startswith('/reflectionNotAvail'):
if a == [] or len(a) < 2:
ba.screenmessage(f"Using: /reflections [type(1/0)] [scale]", transient=True, clients=[clientID])
rs = [int(a[1])]
type = 'soft' if int(a[0]) == 0 else 'powerup'
try:
_ba.get_foreground_host_activity().getMap().node.reflection = type
_ba.get_foreground_host_activity().getMap().node.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().bg.reflection = type
_ba.get_foreground_host_activity().getMap().bg.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().floor.reflection = type
_ba.get_foreground_host_activity().getMap().floor.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().center.reflection = type
_ba.get_foreground_host_activity().getMap().center.reflectionScale = rs
except:
pass
commandSuccess=True
#FLOOR REFLECTION
elif m.startswith('/floorreflectionNotAvail'):
bs.getSharedObject('globals').floorReflection = bs.getSharedObject('globals').floorReflection == False
commandSuccess=True
#ICY or EXCHANGE
elif m in ('/exchange','/icy'):
try:
if True:
try:
player1 = int(a[0])
except:
player1 = playerIdFromNick(a[0])
try:
player2 = int(a[1])
except:
player2 = playerIdFromNick(a[1])
node1 = players[player1].actor.node
node2 = players[player2].actor.node
players[player1].actor.node = node2
players[player2].actor.node = node1
commandSuccess = True
except:
ba.screenmessage(f"Using: /exchange [PlayerID1] [PlayerID2]", transient=True, clients=[clientID])
#ICEOFF or HOCKEY
elif m in ('/hockey','/iceoff'):
try:
activity.getMap().isHockey = activity.getMap().isHockey == False
except:
pass
for i in players:
i.actor.node.hockey = i.actor.node.hockey == False
commandSuccess = True
#VIP
elif m == '/vip':
try:
clID = int(a[0])
updated = roles.vips
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
if a[1] == 'add':
if newID not in updated:
roles.vips.append(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already a vip !",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.vips.remove(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already not a vip !",clientID)
updated = roles.vips
if (len(a) > 2) and (uniqueID in roles.owners) and commandSuccess:
if str(a[2]).startswith('perm'):
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['vips']): d['vips'][newID] = []
if (name not in d['vips'][newID]): d['vips'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[8] = 'vips = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /vip [ClientID] add/remove perm/None", clients=[clientID], transient=True)
#MAXPLAYERS
elif m.startswith('/maxplayer'):
if a == []:
ba.screenmessage(f"Using: /maxplayers [count]", clients=[clientID], transient=True)
else:
try:
_ba.get_foreground_host_().max_players = int(a[0])
_ba.set_public_party_max_size(int(a[0]))
_ba.chatmessage(f"MaxPlayers limit set to {str(int(a[0]))}")
commandSuccess=True
except:
pass
#SAY (Send Chat Message in Server's name)
elif m == "/say":
if a == []:
ba.screenmessage('Usage: /say <text to send>', transient=True, clients=[clientID])
else:
message = " ".join(a)
_ba.chatmessage(message)
#################### OWNER COMMANDS ########################
#KICK VOTE
elif m == '/kickvote':
try:
if a[0] in ('enable','yes','true'): _ba.set_enable_default_kick_voting(True)
if a[0] in ('disable','no','false'): _ba.set_enable_default_kick_voting(False)
commandSuccess = True
except:
ba.screenmessage(f"Using: /kickvote [enable/yes/true or disable/no/false]", clients=[clientID], transient=True)
#TOP
elif m == '/top':
try:
temp_limit = int(a[0])
temp_toppers = []
f = open(statsFile, 'r')
temp_stats = json.loads(f.read())
for i in range(1,limit+1):
for id in temp_stats:
if int(temp_stats[id]['rank'])==i: temp_toppers.append(id)
if temp_toppers != []:
for account_id in temp_toppers:
temp_name = temp_stats[account_id]['name_html']
#print(temp_toppers.index(account_id)+1,temp_name[temp_name.find('>')+1:].encode('utf8'),temp_stats[account_id]['scores'])
_ba.chatmessage("{0}. {1} -----> {2}".format(temp_toppers.index(account_id)+1,temp_name[temp_name.find('>')+1:].encode('utf8'),temp_stats[account_id]['scores']))
commandSuccess=True
f.close()
except:
sendError('Usage: /top <range>',clientID)
#SETSCORE
elif m in ['/setscore','/reset']:
try:
temp_rank = int(a[0])
temp_stats = getStats()
for id in temp_stats:
if int(temp_stats[id]['rank']) == temp_rank: ID = id
f.close()
temp_name = temp_stats[ID]['name_html']
temp_name = temp_name[temp_name.find('>')+1:].encode('utf-8')
try:
temp_score = int(a[1])
except:
temp_score = 0
stats[ID]['score'] = temp_score
_ba.chatmessage("{}'s score set to {}".format(temp_name,temp_score))
#backup
from shutil import copyfile
src = statsFile
from datetime import datetime
now = datetime.now().strftime('%d-%m %H:%M:%S')
dst = 'stats.bak---' + now
copyfile(src,dst)
#write new stats
f = open(statsFile, 'w')
f.write(json.dumps(temp_stats))
f.close()
'''
import mystats
mystats.refreshStats()
'''
commandSuccess=True
except:
sendError('Usage: /reset <rank of player> (optional:newScore)',clientID)
#WARN
elif m == "/warn":
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['displayString']
name = ID
try:
name = i['players'][0]['name']
except:
pass
import chatFilter
warnCount = chatFilter.warn(ID)
if warnCount < 3:
bsInternal._chatMessage("Warning {str(name)}.")
for i in range(3):
sendError('Warning!!!!',clID)
sendError("Warn count: % 1d/3"%(warnCount),clID)
else:
chatFilter.abusers.pop(ID)
_ba.chatmessage(f"Warn limit exceeded. Kicking {str(name)}.")
_ba.chatmessage("Warn system Made By Aleena")
_ba.chatmessage(clID)
commandSuccess = True
except:
ba.screenmessage('Usage: /warn <client_id or name>', transient=True, clients=[clientID])
#CLEAR WARN
elif m.startswith("/clearwarn"):
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
chatFilter.abusers.pop(ID)
_ba.chatmessage(f"{name} has been removed from Abuse/Warn List")
commandSuccess = True
except:
ba.screenmessage('Usage: /clearwarn <client_id or name>', transient=True, clients=[clientID])
#WHOINQUEUE
elif m == '/whoinqueue':
def _onQueueQueryResult(result):
from queueChecker import queueID
#print result, ' is result'
inQueue = result['e']
#print inQueue, ' is inQueue'
string = 'No one '
if inQueue != []:
string = ''
for queue in inQueue:
#print queue[3]
string += queue[3] + ' '
_ba.chatmessage(f"{string} is in the queue")
_ba.add_transaction(
{'type': 'PARTY_QUEUE_QUERY', 'q': queueID},callback=ba.Call(_onQueueQueryResult))
_ba.run_transactions()
commandSuccess=True
#TEXT
elif m in ('/text', '/texts'):
from BsTextOnMap import texts
if a == []:
ba.screenmessage(f"Usage: /text showall or /text add [text] or /text del [textnumber]", clients=[clientID], transient=True)
elif a[0] == 'add' and len(a)>1:
#get whole sentence from argument list
newText = u''
for i in range(1,len(a)):
newText += a[i] + ' '
#print newText
texts.append(newText)
#write to file
with open(python_path + '/BsTextOnMap.py') as (file):
s = [ row for row in file ]
s[0] = 'texts = ' + str(texts) + '\n'
f = open(python_path + '/BsTextOnMap.py', 'w')
for i in s:
f.write(i)
f.close()
commandSuccess=True
elif a[0] == 'showall':
for i in range(len(texts)):
#print texts(i)
_ba.chatmessage(str(i) + '. ' + texts[i])
commandSuccess=True
elif a[0] == 'del' and len(a)>1:
try:
if len(texts) > 1:
texts.pop(int(a[1]))
#write to file
with open(python_path + '/BsTextOnMap.py') as (file):
s = [ row for row in file ]
s[0] = 'texts = ' + str(texts) + '\n'
f = open(python_path + '/BsTextOnMap.py', 'w')
for i in s:
f.write(i)
f.close()
commandSuccess=True
else:
sendError(f"At least one text should be present",clientID)
except:
pass
else:
ba.screenmessage(f"Usage: /text showall or /text add [text] or /text del [textnumber]", clients=[clientID], transient=True)
#ADMIN
elif m == '/admin':
if True: #try:
clID = int(a[0])
updated = roles.admins
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
if a[1] == 'add':
if newID not in updated:
roles.admins.append(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already an admin !",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.admins.remove(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already not an admin !",clientID)
updated = roles.admins
if (len(a) > 2) and (uniqueID in roles.owners) and commandSuccess:
if str(a[2]).startswith('perm'):
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['admins']): d['admins'][newID] = []
if (name not in d['admins'][newID]): d['admins'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[9] = 'admins = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
'''except:
ba.screenmessage(f"Using: /admin [ClientID] add/remove perm/None", clients=[clientID], transient=True)'''
#BAN
elif m == '/ban':
try:
clID = int(a[0])
updated = roles.banList
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
new = i['account_id']
if new not in roles.banList:
if len(a) > 1: roles.banList[new] = [i['display_string'], str(a[1])] #Add Name If Provided
else: roles.banList[new] = [i['display_string']]
updated = roles.banList
commandSuccess=True
_ba.chatmessage(f"{str(name)}, has been BANNED !")
_ba.disconnect_client(clID)
else: sendError(f"{str(name)}, is already BANNED !",clientID)
if not commandSuccess: return
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['banList']): d['banList'][newID] = []
if (name not in d['banList'][newID]): d['banList'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[2] = 'banList = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /ban ClientID (optional-NickNameForIdentification)", clients=[clientID], transient=True)
#SPECIAL
elif m == '/special':
try:
clID = int(a[0])
updated = roles.special
ros = _ba.get_game_roster()
cmds = a[2:]
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
success = False
if a[1] == 'add':
if newID not in updated:
roles.special[newID] = cmds
commandSuccess=True
else:
for cmd in cmds:
if (cmd.startswith('/')) and (cmd not in roles.special[newID]):
roles.special[newID].append(cmd)
success = True
else: sendError(f"{str(name)} already has perms to '{cmd}' !\n (Note: cmd should start with '/')",clientID)
commandSuccess=True
if success: _ba.chatmessage(f"Now {str(name)} can use {str(cmds)}...")
elif a[1] == 'remove':
if (len(a) > 2) and (newID in updated):
for cmd in cmds:
if (cmd.startswith('/')) and (cmd not in roles.special[newID]):
roles.special[newID].remove(cmd)
success = True
else: sendError(f"{str(name)} has no perms to '{cmd}' for you to remove again !\n (Note: cmd should start with '/')",clientID)
commandSuccess=True
if success: _ba.chatmessage(f"Now {str(name)} can't use {str(cmds)}...")
if (len(a) < 3) and (newID in updated):
roles.special.pop(newID)
commandSuccess=True
else: sendError(f"{str(name)} already don't have special perms !",clientID)
updated = roles.special
if (len(a) > 2) and (uniqueID in roles.owners):
if commandSuccess:
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['special']): d['special'][newID] = []
if (name not in d['special'][newID]): d['special'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[10] = 'special = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /special [ClientID] add/remove Cmds", clients=[clientID], transient=True)
#PARTYNAME
elif m == '/partyname':
if a == []:
ba.screenmessage(f"Usage: /partyname Name of party", clients=[clientID], transient=True)
else:
name = ''
for word in a:
name += word + ' '
try:
_ba.set_public_party_name(name)
ba.screenmessage(f"Party name changed to '{name}'.")
mysettings.server_name = name
commandSuccess=True
except:
sendError("failed to change party's name")
#PARTY
elif m == '/party':
if a == []:
ba.screenmessage(f"Usage: /party 0(pvt) or 1(pub)", clients=[clientID], transient=True)
elif (a[0] == '0') or (a[0].startswith('Pri')) or (a[0] == 'Pvt'):
try:
_ba.set_public_party_enabled(False)
_ba.chatmessage('Party is Private...')
commandSuccess=True
except:
sendError('failed to change',clientID)
elif a[0] == '1' or (a[0].startswith('Pub')):
try:
_ba.set_public_party_enabled(True)
_ba.chatmessage('Party is Public...')
commandSuccess=True
except:
sendError('failed to change',clientID)
else:
ba.screenmessage(f"Usage: /party 0(pvt) or 1(pub)", clients=[clientID], transient=True)
#SET SCREEN TEXT COLOR
elif m in ('/setscreentextcolor', '/settextcolor', '/setscreencolor'):
try:
if len(a) > 1: screenTextColor = (int(a[0]), int(a[1]), int(a[2]))
if (len(a) == 1) and (isinstance(a[0], int)): screenTextColor = tuple(a[0])
commandSuccess = True
except:
ba.screenmessage('Usage: /setscreentextcolor R G B', transient=True, clients=[clientID])
#WL
elif m == '/wl': #whiteListMode
try:
wlm = settings['whiteListMode']
if len(a) < 2:
if a[0].lower() in ('no', 'off', 'disable'):
if wlm :
wlm = False
ba.screenmessage("Server WhiteList Mode disabled for 30 seconds\n if want to disable permanently, use\n '/settings whiteListMode disable'", color=(1,0,0), clients=[clientID], transient=True)
ba.Timer(30, ba.Call(enable_back), timetype=ba.TimeType.REAL)
else: ba.screenmessage("Wait what, why u wanna disable a thing\n which is already disabled..?", color=(1,0,0), clients=[clientID], transient=True)
if a[0].lower() in ('yes', 'on', 'enable'):
ba.screenmessage("Use '/settings whiteListMode enable' instead of this cmd!", color=(1,0,0), clients=[clientID], transient=True)
else:
clID = int(a[1])
#refresh/update jsons
m = open(membersFile, 'r')
org_mem = json.loads(m)
org_mem['serverWhiteList'] = roles.serverWhiteList
m2 = open(membersFile, 'w')
m2.write(json.dumps(org_mem, indent=4))
m2.close()
updated = roles.serverWhiteList
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['display_string']
newID = i['account_id']
success = False
if a[1] == 'add':
if newID not in updated:
roles.serverWhiteList[newID] = name
commandSuccess=True
else: sendError(f"{str(name)}, is already in serverWhiteList!",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.serverWhiteList.pop(newID)
commandSuccess=True
else: sendError(f"{str(name)} already not in serverWhiteList!",clientID)
updated = roles.serverWhiteList
if (len(a) > 2) and (uniqueID in roles.owners):
if commandSuccess:
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
d['serverWhiteList'] = updated
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[3] = 'serverWhiteList = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
def enable_back():
wlm = True
except:
ba.screenmessage(f"Using: /wl [ClientID] add/remove", clients=[clientID], transient=True)
#CHAT COOL DOWN
elif m == '/cd':
try:
if a[0].lower() in ('no', 'off', 'disable'):
if chatCoolDownTime:
chatCoolDownTime = False
#commandSuccess = True #This line maybe used by you in other commands
else: ba.screenmessage("Wait what, why u wanna disable a thing\n which is already disabled..?", color=(1,0,0), clients=[clientID], transient=True)
else:
try:
if int(a[0].lower()) in range(300):
chatCoolDownTime = int(a[0])
_ba.chatmessage("Successfully set chatCoolDown time to {} seconds :)".format(str(a[0])))
#commandSuccess = True #This line maybe used by you in other commands
else: ba.screenmessage("Oof... 300 seconds is maximum cooldown, Why this much?", color=(1,1,1), clients=[clientID], transient=True)
except:
ba.screenmessage("Give an Integer as arg... you can't trick me\n Usage: '/cd CD_Time_In_Integer'", color=(1,0,0), clients=[clientID], transient=True)
except:
ba.screenmessage("Usage:\n'/cd disable/off/no' [or] '/cd CD_Time_In_Integer' for enabling...", color=(1,0,0), clients=[clientID], transient=True)
#PAUSE
elif m == '/pause':
activity.globalsnode.paused = activity.globalsnode.paused == False
commandSuccess=True
#SETTINGS
elif m in ('/settings', '/set', '/setting'):
try:
success = False
enables = ('yes', 'on', 'enable')
disables = ('no', 'off', 'disable')
_set_ = a[0]
if _set_.lower() in ('powerups', 'p', 'pups'):
if len(a) <= 2:
sendError(f"Invalid key !, Try checking by '/help settings'",clientID)
else:
_set = str(a[1])
if _set in powerups:
if str(a[2]).lower() in enables:
if powerups[_set] != True:
powerups[_set] = True
commandSuccess=True
else: sendError(f"This Setting is already enabled !",clientID)
if str(a[2]).lower() in disables:
if powerups[_set] != False:
powerups[_set] = False
commandSuccess=True
else: sendError(f"This Setting is already disabled !",clientID)
else: sendError(f"Invalid key !, Try checking by '/help settings'",clientID)
else:
_set = _set_
if _set in settings:
if str(a[1]).lower() in enables:
if settings[_set] != True:
settings[_set] = True
commandSuccess=True
else: sendError(f"This Setting is already enabled !",clientID)
if str(a[1]).lower() in disables:
if settings[_set] != False:
settings[_set] = False
commandSuccess=True
else: sendError(f"This Setting is already disabled !",clientID)
else: sendError(f"Invalid key !, Try checking by '/help settings'",clientID)
with open(python_path + '/administrator_setup.py') as (file):
s = [ row for row in file ]
s[0] = 'settings = ' + str(settings) + '\n'
s[1] = 'powerups = ' + str(powerups) + '\n'
f = open(python_path + '/settings.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /settings [setting] [subSetting(optional)] enable/disable", clients=[clientID], transient=True)
else:
pass
return [commandSuccess,str(client_str),str(uniqueID),costOfCommand]
c = chatOptions()
def cmd(clientID: int, msg: str):
if settings['enableCoinSystem']:
import coinSystem, datetime
if _ba.get_foreground_host_activity() is not None:
cs = c.opt(msg, clientID)
if cs[0]:
#send the Command Message
cmdMsg = f"{cs[1]} - {msg}"
if settings['hideCmds']: ba.screenmessage(cmdMsg,color=(0,0,1))
if commandByCoin:
coinSystem.addCoins(cs[2], cs[3] * -1)
_ba.chatmessage(f"Success! That cost you {tic}{str(cs[3])}")
else:
with ba.Context(_ba.get_foreground_host_activity()):
ba.screenmessage(reply, color=(0.1,1,0.1))
#_ba.chatmessage(reply)
#Update Logs...
now = datetime.datetime.now().strftime("%H:%M:%S - %d %b %y")
logMsg = f"{str(now)} ||{cs[2]}|| {cmdMsg} \n"
log_list = get_cmd_logs_as_list()
log_list.insert(0, logMsg)
with open(cmdLogFile, 'w') as f:
for i in log_list:
f.write(i)
f.close()
return
| 55.100756
| 290
| 0.376754
|
acff2549ef9e1ee8f3eed300bc87c26d73ba5613
| 12,724
|
py
|
Python
|
luseesky/utils/parse_fits.py
|
christianhbye/lusee_sky_simulations
|
608fb9486affcc56b894b43ae029df9ba4a324ee
|
[
"MIT"
] | null | null | null |
luseesky/utils/parse_fits.py
|
christianhbye/lusee_sky_simulations
|
608fb9486affcc56b894b43ae029df9ba4a324ee
|
[
"MIT"
] | 10
|
2022-02-14T07:40:02.000Z
|
2022-03-02T05:12:41.000Z
|
luseesky/utils/parse_fits.py
|
christianhbye/lusee_sky_simulations
|
608fb9486affcc56b894b43ae029df9ba4a324ee
|
[
"MIT"
] | null | null | null |
from astropy.io import fits # type: ignore
from dataclasses import dataclass, field
import matplotlib.pyplot as plt # type: ignore
import numpy as np
from pathlib import Path
from pyuvdata import uvbeam # type: ignore
from typing import Any, Optional
import warnings
from .coordinates import sph2cart, cart2sph
def mk_linspace(low: float, high: float, step: Any = 1) -> np.ndarray:
"""
Make a linspace given low, high, step. This avoids the stability
issues in np.arange(low, high, step) when low >> step (see numpy doc).
If high is not a multiple of steps away from low, a warning will be raised.
"""
if not np.isclose((high - low) // step, (high - low) / step, atol=1e-4):
warnings.warn(
"'high' is not a multiple of 'step' away from 'low',\
'step' will be changed.",
UserWarning,
)
num = int((high - low) // step + 1)
return np.linspace(low, high, num=num)
def Efield_to_power(efield: np.ndarray, axis: int = 3) -> np.ndarray:
"""
axis: the axis representing the x,yz-components. To be summed over.
returns beam in V
"""
return np.sqrt(np.sum(np.abs(efield) ** 2, axis=axis))
@dataclass
class Beam:
fname: str
beam_coords: str = "cartesian"
E_field: np.ndarray = field(init=False)
power: np.ndarray = field(init=False)
frequencies: np.ndarray = field(init=False)
theta: np.ndarray = field(init=False)
phi: np.ndarray = field(init=False)
def __post_init__(self):
simfits = fits.open(self.fname)
header = simfits[0].header
self.E_field = simfits[0].data + 1j * simfits[1].data # nu, th, ph
simfits.close()
self.E_field /= 1e3 # convert mV to V
self.frequencies = mk_linspace(
header["freq_start"], header["freq_end"], step=header["freq_step"]
) # in MHz
self.theta = mk_linspace(
header["theta_start"],
header["theta_end"],
step=header["theta_step"],
)
self.phi = mk_linspace(
header["phi_start"], header["phi_end"], step=header["phi_step"]
)
if np.allclose(self.E_field[:, :, 0], self.E_field[:, :, -1]):
assert np.isclose(self.phi[-1] - self.phi[0], 360)
self.E_field = self.E_field[:, :, :-1] # drop phi = 360 deg
self.phi = self.phi[:-1]
self.power = Efield_to_power(self.E_field, axis=3)
def plot_power(self, freq: float):
freq_idx = np.argmin(np.abs(self.frequencies - freq))
plt.figure()
plt.imshow(
self.power[freq_idx],
interpolation="none",
aspect="auto",
extent=[
self.phi.min(),
self.phi.max(),
self.theta.max(),
self.theta.min(),
],
)
plt.colorbar(label="Power [V]")
plt.title(
"Power at $\\nu={:.0f}$ MHz".format(self.frequencies[freq_idx])
)
plt.xlabel("$\\phi$ [deg]")
plt.ylabel("$\\theta$ [deg]")
plt.show()
def plot_beamcuts(self, phi: float = 0):
phi_idx = np.argmin(np.abs(self.phi - phi))
plt.figure()
plt.imshow(
self.power[:, :, phi_idx],
interpolation="none",
aspect="auto",
extent=[
self.theta.min(),
self.theta.max(),
self.frequencies.max(),
self.frequencies.min(),
],
)
plt.title("Power at $\\phi={:.0f}$ deg".format(self.phi[phi_idx]))
plt.ylabel("$\\nu$ [MHz]")
plt.xlabel("$\\theta$ [deg]")
plt.colorbar(label="Power [V]")
plt.show()
def to_sphericals(self):
if self.beam_coords == "sphericals":
warnings.warn(
"E-field is already in spherical coordinates.", UserWarning
)
else: # cartesian coordinates
E_sph = np.empty_like(self.E_field)
for i, th in enumerate(np.radians(self.theta)):
for j, ph in enumerate(np.radians(self.phi)):
rot_matrix = cart2sph(th, ph)
E_sph[:, i, j] = np.einsum(
"ij,fj->fi", rot_matrix, self.E_field[:, i, j]
)
self.E_field = E_sph
self.beam_coords = "sphericals"
def to_cartesian(self):
if self.beam_coords == "cartesian":
warnings.warn(
"E-field is already in cartesian coordinates.", UserWarning
)
else: # spherical coordinates
E_cart = np.empty_like(self.E_field)
for i, th in enumerate(np.radians(self.theta)):
for j, ph in enumerate(np.radians(self.phi)):
rot_matrix = sph2cart(th, ph)
E_cart[:, i, j] = np.einsum(
"ij,fj->fi", rot_matrix, self.E_field[:, i, j]
)
self.E_field = E_cart
self.beam_coords = "cartesian"
def _flatten(
self,
beam_type: str = "power",
arr: Optional[np.ndarray] = None,
return_th_ph=True,
) -> np.ndarray:
"""
Convert array with the shape (freq_size, th_size, ph_size) to a
2d-array of shape (freq_size, th_size*ph_size) where theta increases
faster than phi
"""
if arr is None:
if beam_type == "power":
arr = np.copy(self.power)
else:
raise ValueError("No default array to flatten for E-field.")
flat_beam = arr.reshape(
self.frequencies.size, self.theta.size * self.phi.size, order="F"
)
flat_theta = np.tile(self.theta, self.phi.size)
flat_phi = (
np.tile(self.phi, self.theta.size)
.reshape(self.phi.size, self.theta.size, order="F")
.flatten(order="C")
)
if return_th_ph:
return np.array([flat_beam, flat_theta, flat_phi])
else:
return flat_beam
def _write_txt_power(self, path: str = ".", verbose: bool = False) -> str:
beam2d, th2d, ph2d = self._flatten()
savepath = path + "/tmp"
Path(savepath).mkdir()
if verbose:
print(f"Saving {len(beam2d)} files to {savepath}")
for i, freq in enumerate(self.frequencies):
np.savetxt(
savepath + f"/{freq}.txt",
np.column_stack((th2d, ph2d, beam2d[i])),
header="Theta [deg] Phi [deg] Abs(V) [V] \n\n",
comments="",
)
return savepath
def _write_txt_Efield(
self, pol: str = "x", path: str = ".", verbose: bool = False
) -> str:
"""
Save Efield beams in txt file format readable by UVBeam.
"""
# get x-pol or y-pol. Easiest to convert to cartesian first:
# if self.beam_coords == "sphericals":
# self.to_cartesian()
# if pol == "x": # XXX: not optimal, should be done on a copy
# self.E_field[:, :, :, 1:] = 0 # set Ey and Ez to 0 for x pol
# elif pol == "y":
# self.E_field[:, :, :, 0] = 0 # Ex = 0
# self.E_field[:, :, :, 2] = 0 # Ez = 0
# else:
# raise ValueError("pol must be 'x' or 'y'")
self.to_sphericals()
E_theta = self.E_field[:, :, :, 1]
E_phi = self.E_field[:, :, :, 2]
theta_mag = np.abs(E_theta)
theta_phase = np.degrees(np.angle(E_theta))
theta_phase = np.where(theta_phase < 0, theta_phase + 360, theta_phase)
phi_mag = np.abs(E_phi)
phi_phase = np.degrees(np.angle(E_phi))
phi_phase = np.where(phi_phase < 0, phi_phase + 360, phi_phase)
ax_ratio_sq = (
theta_mag**2 + phi_mag**2 + np.abs(E_theta**2 + E_phi**2)
)
ax_ratio_sq /= (
theta_mag**2 + phi_mag**2 - np.abs(E_theta**2 + E_phi**2)
)
ax_ratio = np.sqrt(ax_ratio_sq)
theta_mag, theta, phi = self._flatten(
beam_type="E_field", arr=theta_mag
)
theta_phase = self._flatten(
beam_type="E_field", arr=theta_phase, return_th_ph=False
)
phi_mag = self._flatten(
beam_type="E_field", arr=phi_mag, return_th_ph=False
)
phi_phase = self._flatten(
beam_type="E_field", arr=phi_phase, return_th_ph=False
)
ax_ratio = self._flatten(
beam_type="E_field", arr=ax_ratio, return_th_ph=False
)
delta = np.radians(theta_phase - phi_phase)
E_mag = np.sqrt(
theta_mag**2
+ phi_mag**2
+ 2 * theta_mag * phi_mag * np.cos(delta)
)
savepath = path + "/tmp"
Path(savepath).mkdir()
if verbose:
print(f"Saving {len(self.frequencies)} files to {savepath}")
for i, freq in enumerate(self.frequencies):
np.savetxt(
savepath + f"/{freq}.txt",
np.column_stack(
(
theta,
phi,
E_mag[i],
theta_mag[i],
theta_phase[i],
phi_mag[i],
phi_phase[i],
ax_ratio[i],
)
),
fmt="%.7f",
header=(
"Theta [deg] Phi [deg] Abs(V) [V] Abs(Theta) [V]"
"Phase(Theta) [deg] Abs(Phi) [V] Phase(Phi) [deg]"
"Ax.Ratio []\n\n"
),
comments="",
)
return savepath
@staticmethod
def _delete_txt(path: str, verbose: bool = False):
for f in Path(path).iterdir():
assert f.suffix == ".txt" # safety
f.unlink() # delete file
if verbose:
print("Deleting files.")
Path(path).rmdir()
if verbose:
print(f"Remove directory {path}.")
def to_uvbeam(
self, beam_type: str = "E_field", verbose: bool = False
) -> uvbeam.UVBeam:
uvb = uvbeam.UVBeam()
if beam_type == "power":
if verbose:
print("Making UVBeam object from power beam.")
txtpath = self._write_txt_power(verbose=verbose)
txtfiles = [str(child) for child in Path(txtpath).iterdir()]
frequencies = [
1e6 * float(Path(f).name[: -len(".txt")]) for f in txtfiles
]
txtfiles = sorted(
txtfiles, key=lambda x: frequencies[txtfiles.index(x)]
)
frequencies = sorted(frequencies)
uvb.read_cst_beam(
filename=txtfiles,
beam_type="power",
feed_pol="x",
rotate_pol=False,
frequency=frequencies,
telescope_name="lusee-night",
feed_name="lusee",
feed_version="1.0",
model_name="dipole_180",
model_version="1.0",
history="004",
reference_impedance=50,
)
uvb.interpolation_function = "az_za_simple"
self._delete_txt(txtpath, verbose=verbose)
elif beam_type == "E_field":
if verbose:
print("Making UVBeam object from E-field beam.")
txtpath = self._write_txt_Efield(pol="x", verbose=verbose)
txtfiles = [str(child) for child in Path(txtpath).iterdir()]
frequencies = [
1e6 * float(Path(f).name[: -len(".txt")]) for f in txtfiles
]
txtfiles = sorted(
txtfiles, key=lambda x: frequencies[txtfiles.index(x)]
)
frequencies = sorted(frequencies)
uvb.read_cst_beam(
filename=txtfiles,
beam_type="efield",
feed_pol="x",
rotate_pol=True, # XXX
frequency=frequencies,
telescope_name="lusee-night",
feed_name="lusee",
feed_version="1.0",
model_name="dipole_180",
model_version="1.0",
history="004",
x_orientation="north", # XXX
reference_impedance=50,
)
uvb.interpolation_function = "az_za_simple"
self._delete_txt(txtpath, verbose=verbose)
else:
raise ValueError("beam_type must be 'power' or 'E_field'")
return uvb
| 36.563218
| 79
| 0.51116
|
acff259283e1eec5e8740c189d715e031471ec8c
| 1,613
|
py
|
Python
|
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | 1
|
2021-10-02T11:03:49.000Z
|
2021-10-02T11:03:49.000Z
|
import math
from torch import nn
import torch
from torch.nn.modules.activation import ReLU
from torchvision import transforms
from PIL import Image
class SRCNN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
class SRCNN_BN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN_BN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.BatchNorm2d(64),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.BatchNorm2d(32),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
if __name__ == "__main__":
model = SRCNN_BN(3)
img = torch.rand((1, 3, 600, 600))
print(model(img).shape)
| 29.87037
| 62
| 0.568506
|
acff2646f306c65a4e88af7192790ad7d069cbcd
| 1,456
|
py
|
Python
|
tools/upgrade/commands/tests/fixme_test.py
|
tahmidbintaslim/pyre-check
|
59d694f7733d7b160ecaddbe79d5d79cef0e83bc
|
[
"MIT"
] | null | null | null |
tools/upgrade/commands/tests/fixme_test.py
|
tahmidbintaslim/pyre-check
|
59d694f7733d7b160ecaddbe79d5d79cef0e83bc
|
[
"MIT"
] | null | null | null |
tools/upgrade/commands/tests/fixme_test.py
|
tahmidbintaslim/pyre-check
|
59d694f7733d7b160ecaddbe79d5d79cef0e83bc
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from unittest.mock import MagicMock, patch
from ... import errors
from ...repository import Repository
from ..command import ErrorSuppressingCommand
from ..fixme import ErrorSource, Fixme
repository = Repository()
class FixmeTest(unittest.TestCase):
def test_run(self) -> None:
arguments = MagicMock()
arguments.error_source = "stdin"
mock_errors = MagicMock()
with patch.object(
errors.Errors, "from_stdin", return_value=mock_errors
) as errors_from_stdin, patch.object(
ErrorSuppressingCommand, "_suppress_errors"
) as suppress_errors:
Fixme(arguments, repository).run()
errors_from_stdin.assert_called_once()
suppress_errors.assert_called_once_with(mock_errors)
arguments.error_source = ErrorSource.GENERATE
arguments.lint = False
with patch.object(
Fixme, "_generate_errors", return_value=mock_errors
) as generate_errors, patch.object(
ErrorSuppressingCommand, "_suppress_errors"
) as suppress_errors:
Fixme(arguments, repository).run()
generate_errors.assert_called_once()
suppress_errors.assert_called_once_with(mock_errors)
| 30.978723
| 65
| 0.690934
|
acff268a61a189c8422649b9505d321356dc7c1c
| 643
|
py
|
Python
|
tests/test_logging.py
|
OSC-JYU/oscari-ES
|
e085a87df6cedd3bf6b5faa41b5c73a229329d33
|
[
"Apache-2.0"
] | null | null | null |
tests/test_logging.py
|
OSC-JYU/oscari-ES
|
e085a87df6cedd3bf6b5faa41b5c73a229329d33
|
[
"Apache-2.0"
] | null | null | null |
tests/test_logging.py
|
OSC-JYU/oscari-ES
|
e085a87df6cedd3bf6b5faa41b5c73a229329d33
|
[
"Apache-2.0"
] | null | null | null |
from .fixtures import elasticsearch
import pytest
def test_elasticsearch_logs_are_in_docker_logs(elasticsearch):
elasticsearch.assert_in_docker_log('o.e.n.Node')
# eg. elasticsearch1 | [2017-07-04T00:54:22,604][INFO ][o.e.n.Node ] [docker-test-node-1] initializing ...
def test_security_audit_logs_are_in_docker_logs(elasticsearch):
elasticsearch.assert_in_docker_log('x.s.a.l.LoggingAuditTrail')
# eg. elasticsearch1 | [2017-07-04T01:10:19,189][INFO ][o.e.x.s.a.l.LoggingAuditTrail] [transport] [access_granted]
def test_info_level_logs_are_in_docker_logs(elasticsearch):
elasticsearch.assert_in_docker_log('INFO')
| 37.823529
| 119
| 0.780715
|
acff26e4d02536bd4ce1beeca0599fcffbdefff8
| 800
|
py
|
Python
|
backend/user_profile/tests/test_schema.py
|
fusion44/btc-graphql
|
1b9da82df26aa3de8d81253d7fadd425181be6d6
|
[
"MIT"
] | null | null | null |
backend/user_profile/tests/test_schema.py
|
fusion44/btc-graphql
|
1b9da82df26aa3de8d81253d7fadd425181be6d6
|
[
"MIT"
] | null | null | null |
backend/user_profile/tests/test_schema.py
|
fusion44/btc-graphql
|
1b9da82df26aa3de8d81253d7fadd425181be6d6
|
[
"MIT"
] | null | null | null |
import pytest
from mixer.backend.django import mixer
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from .. import schema
# We need to do this so that writing to the DB is possible in our tests.
pytestmark = pytest.mark.django_db
def test_user_type():
instance = schema.UserType()
assert instance, "Should instanciate a UserType object"
def test_resolve_current_user():
q = schema.Query()
req = RequestFactory().get("/")
req.user = AnonymousUser()
res = q.resolve_current_user(req)
assert res is None, "Should return None if user is not authenticated"
user = mixer.blend("auth.User")
req.user = user
res = q.resolve_current_user(req)
assert res == user, "Should return the current user if authenticated"
| 28.571429
| 73
| 0.73125
|
acff2706150d07319eaa26be017a5ec284e5f3f9
| 2,434
|
py
|
Python
|
venv/lib/python3.7/site-packages/diffoscope/comparators/gettext.py
|
crazyzete/AppSecAssignment2
|
a5520738e6c5924b94f69980eba49a565c2561d7
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/diffoscope/comparators/gettext.py
|
crazyzete/AppSecAssignment2
|
a5520738e6c5924b94f69980eba49a565c2561d7
|
[
"MIT"
] | 1
|
2021-02-08T20:34:54.000Z
|
2021-02-08T20:34:54.000Z
|
venv/lib/python3.7/site-packages/diffoscope/comparators/gettext.py
|
crazyzete/AppSecAssignment2
|
a5520738e6c5924b94f69980eba49a565c2561d7
|
[
"MIT"
] | 1
|
2020-11-04T06:48:34.000Z
|
2020-11-04T06:48:34.000Z
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import io
import re
import logging
from diffoscope.tools import tool_required
from diffoscope.difference import Difference
from .utils.file import File
from .utils.command import Command
logger = logging.getLogger(__name__)
class Msgunfmt(Command):
CHARSET_RE = re.compile(rb'^"Content-Type: [^;]+; charset=([^\\]+)\\n"$')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._header = io.BytesIO()
self._encoding = None
@tool_required('msgunfmt')
def cmdline(self):
return ['msgunfmt', self.path]
def filter(self, line):
if not self._encoding:
self._header.write(line)
if line == b'\n':
logger.debug(
"unable to determine PO encoding, let's hope it's utf-8"
)
self._encoding = 'utf-8'
return self._header.getvalue()
found = Msgunfmt.CHARSET_RE.match(line)
if found:
self._encoding = found.group(1).decode('us-ascii').lower()
return (
self._header.getvalue()
.decode(self._encoding)
.encode('utf-8')
)
return b''
if self._encoding != 'utf-8':
return line.decode(self._encoding).encode('utf-8')
else:
return line
class MoFile(File):
DESCRIPTION = "Gettext message catalogues"
FILE_TYPE_RE = re.compile(r'^GNU message catalog\b')
def compare_details(self, other, source=None):
return [Difference.from_command(Msgunfmt, self.path, other.path)]
| 32.453333
| 77
| 0.63106
|
acff288df6eb578589cb685dfabc4e879d6d7998
| 1,580
|
py
|
Python
|
macs-to-names.py
|
srobo-legacy/comp-srcomp-kiosk
|
e084e90dddeeb40a15bf8e350dfc59ff14a161f3
|
[
"MIT"
] | null | null | null |
macs-to-names.py
|
srobo-legacy/comp-srcomp-kiosk
|
e084e90dddeeb40a15bf8e350dfc59ff14a161f3
|
[
"MIT"
] | null | null | null |
macs-to-names.py
|
srobo-legacy/comp-srcomp-kiosk
|
e084e90dddeeb40a15bf8e350dfc59ff14a161f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import os.path
FILE_NAME = 'pi_macs'
NAME_TEMPLATE = 'pi-{page}-{qual}.srobo'
PAGE_TEMPLATE = 'http://%{{hiera(\'compbox_hostname\')}}/{page}.html{query}'
CONTENT_TEMPLATE = '''# Student Robotics Pi {ident}
---
url: {url}
hostname: {name}
'''
def tidy(lines):
output_lines = []
for line in lines:
hash_idx = line.find('#')
if hash_idx > -1:
line = line[:hash_idx]
line = line.strip()
if line:
output_lines.append(line)
return output_lines
def build_url(page):
parts = page.split('?')
if len(parts) == 1:
return PAGE_TEMPLATE.format(page=page, query='')
else:
query = '?' + parts[1]
return PAGE_TEMPLATE.format(page=parts[0], query=query)
def build_name(ident, page):
parts = page.split('?')
if len(parts) == 1:
return NAME_TEMPLATE.format(page=page, qual=ident)
else:
qual = parts[1].replace(',', '')
return NAME_TEMPLATE.format(page=parts[0], qual=qual)
def build_filename(mac):
return os.path.join('hieradata', 'node', mac + '.yaml')
with open(FILE_NAME, 'r') as fh:
lines = tidy(fh.readlines())
names = []
for line in lines:
ident, mac, page = line.split()
name = build_name(ident, page)
url = build_url(page)
names.append(name)
fn = build_filename(mac)
with open(fn, 'w+') as fh:
fh.write(CONTENT_TEMPLATE.format(name=name, ident=ident, url=url))
with open('pi-names', mode='w') as f:
print('\n'.join(names), file=f)
| 23.939394
| 76
| 0.611392
|
acff29436b5b950387c70eb921a5744334548e34
| 2,877
|
py
|
Python
|
markups/__init__.py
|
Hrissimir/pymarkups
|
4ece8169063f8acfb8557c6a0f4d929af0f434e9
|
[
"BSD-3-Clause"
] | null | null | null |
markups/__init__.py
|
Hrissimir/pymarkups
|
4ece8169063f8acfb8557c6a0f4d929af0f434e9
|
[
"BSD-3-Clause"
] | null | null | null |
markups/__init__.py
|
Hrissimir/pymarkups
|
4ece8169063f8acfb8557c6a0f4d929af0f434e9
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of python-markups module
# License: 3-clause BSD, see LICENSE file
# Copyright: (C) Dmitry Shachnev, 2012-2021
from typing import List, Optional, Type
from markups.abstract import AbstractMarkup
from markups.markdown import MarkdownMarkup
from markups.restructuredtext import ReStructuredTextMarkup
from markups.textile import TextileMarkup
__version_tuple__ = (3, 1, 1)
__version__ = '.'.join(map(str, __version_tuple__))
builtin_markups = [MarkdownMarkup, ReStructuredTextMarkup, TextileMarkup]
# Public API
def get_all_markups() -> List[Type[AbstractMarkup]]:
"""
:returns: list of all markups (both standard and custom ones)
"""
try:
from importlib.metadata import entry_points
except ImportError: # backport for older Python versions
from importlib_metadata import entry_points
try: # Python 3.10+ or importlib_metadata 3.6+
entrypoints = entry_points(group="pymarkups")
except TypeError: # Older versions
entrypoints = entry_points()["pymarkups"]
return [entry_point.load() for entry_point in entrypoints]
def get_available_markups() -> List[Type[AbstractMarkup]]:
"""
:returns: list of all available markups (markups whose
:meth:`~markups.abstract.AbstractMarkup.available`
method returns True)
"""
available_markups = []
for markup in get_all_markups():
if markup.available():
available_markups.append(markup)
return available_markups
def get_markup_for_file_name(filename: str, return_class: bool = False):
"""
:param filename: name of the file
:param return_class: if true, this function will return
a class rather than an instance
:returns: a markup with
:attr:`~markups.abstract.AbstractMarkup.file_extensions`
attribute containing extension of `filename`, if found,
otherwise ``None``
>>> import markups
>>> markup = markups.get_markup_for_file_name('foo.mkd')
>>> markup.convert('**Test**').get_document_body()
'<p><strong>Test</strong></p>\\n'
>>> markups.get_markup_for_file_name('bar.rst', return_class=True)
<class 'markups.restructuredtext.ReStructuredTextMarkup'>
"""
markup_class = None
for markup in get_all_markups():
for extension in markup.file_extensions:
if filename.endswith(extension):
markup_class = markup
if return_class:
return markup_class
if markup_class and markup_class.available():
return markup_class(filename=filename)
def find_markup_class_by_name(name: str) -> Optional[Type[AbstractMarkup]]:
"""
:returns: a markup with
:attr:`~markups.abstract.AbstractMarkup.name`
attribute matching `name`, if found, otherwise ``None``
>>> import markups
>>> markups.find_markup_class_by_name('textile')
<class 'markups.textile.TextileMarkup'>
"""
for markup in get_all_markups():
if markup.name.lower() == name.lower():
return markup
return None
| 33.453488
| 75
| 0.737226
|
acff29449a254d307f640f5f9cdd130751d76434
| 3,250
|
py
|
Python
|
baselines/common/mpi_adam_optimizer.py
|
mateuszkupper/baselines
|
0afd277690be927c7d02a3efef9793119115eceb
|
[
"MIT"
] | 23
|
2020-02-25T21:30:59.000Z
|
2022-03-31T00:09:14.000Z
|
baselines/common/mpi_adam_optimizer.py
|
mateuszkupper/baselines
|
0afd277690be927c7d02a3efef9793119115eceb
|
[
"MIT"
] | 5
|
2020-08-17T03:26:34.000Z
|
2022-03-19T11:34:54.000Z
|
baselines/common/mpi_adam_optimizer.py
|
mateuszkupper/baselines
|
0afd277690be927c7d02a3efef9793119115eceb
|
[
"MIT"
] | 5
|
2020-02-29T21:46:27.000Z
|
2021-11-03T07:30:16.000Z
|
import numpy as np
import tensorflow as tf
from baselines.common import tf_util as U
from baselines.common.tests.test_with_mpi import with_mpi
try:
from mpi4py import MPI
except ImportError:
MPI = None
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
countholder = [0] # Counts how many times _collect_grads has been called
stat = tf.reduce_sum(grads_and_vars[0][1]) # sum of first variable
def _collect_grads(flat_grad, np_stat):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks), out=buf)
if countholder[0] % 100 == 0:
check_synced(np_stat, self.comm)
countholder[0] += 1
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad, stat], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
def check_synced(localval, comm=None):
"""
It's common to forget to initialize your variables to the same values, or
(less commonly) if you update them in some other way than adam, to get them out of sync.
This function checks that variables on all MPI workers are the same, and raises
an AssertionError otherwise
Arguments:
comm: MPI communicator
localval: list of local variables (list of variables on current worker to be compared with the other workers)
"""
comm = comm or MPI.COMM_WORLD
vals = comm.gather(localval)
if comm.rank == 0:
assert all(val==vals[0] for val in vals[1:])
@with_mpi(timeout=5)
def test_nonfreeze():
np.random.seed(0)
tf.set_random_seed(0)
a = tf.Variable(np.random.randn(3).astype('float32'))
b = tf.Variable(np.random.randn(2,5).astype('float32'))
loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))
stepsize = 1e-2
# for some reason the session config with inter_op_parallelism_threads was causing
# nested sess.run calls to freeze
config = tf.ConfigProto(inter_op_parallelism_threads=1)
sess = U.get_session(config=config)
update_op = MpiAdamOptimizer(comm=MPI.COMM_WORLD, learning_rate=stepsize).minimize(loss)
sess.run(tf.global_variables_initializer())
losslist_ref = []
for i in range(100):
l,_ = sess.run([loss, update_op])
print(i, l)
losslist_ref.append(l)
| 41.139241
| 117
| 0.669846
|
acff294bbe7e99ab018025a4fac017c6ebb2999c
| 173
|
py
|
Python
|
rigid_opt/__init__.py
|
Algomorph/LevelSetFusion-Python
|
46625cd185da4413f9afaf201096203ee72d3803
|
[
"Apache-2.0"
] | 8
|
2019-01-30T19:01:25.000Z
|
2021-03-05T14:10:51.000Z
|
rigid_opt/__init__.py
|
Algomorph/LevelSetFusion-Python
|
46625cd185da4413f9afaf201096203ee72d3803
|
[
"Apache-2.0"
] | 58
|
2018-12-19T16:57:38.000Z
|
2019-06-06T19:52:36.000Z
|
rigid_opt/__init__.py
|
Algomorph/LevelSetFusion-Python
|
46625cd185da4413f9afaf201096203ee72d3803
|
[
"Apache-2.0"
] | 2
|
2019-03-06T06:30:30.000Z
|
2019-06-03T11:00:15.000Z
|
# ================================================================
# Created by Fei Shan on 01/23/19.
#
# ================================================================
| 43.25
| 67
| 0.138728
|
acff2a32ca42e6b6a5a2f13d999a68257739cf0c
| 9,233
|
py
|
Python
|
ls/joyous/tests/test_multiday_recurring_event.py
|
pure-creative/ls.joyous
|
f06caef889c1eecf9b784f69d189a206b7f34e14
|
[
"BSD-3-Clause"
] | 72
|
2018-03-16T16:35:08.000Z
|
2022-03-23T08:09:33.000Z
|
ls/joyous/tests/test_multiday_recurring_event.py
|
pure-creative/ls.joyous
|
f06caef889c1eecf9b784f69d189a206b7f34e14
|
[
"BSD-3-Clause"
] | 41
|
2018-03-25T20:36:52.000Z
|
2022-03-10T08:59:27.000Z
|
ls/joyous/tests/test_multiday_recurring_event.py
|
pure-creative/ls.joyous
|
f06caef889c1eecf9b784f69d189a206b7f34e14
|
[
"BSD-3-Clause"
] | 28
|
2018-08-13T22:36:09.000Z
|
2022-03-17T12:24:15.000Z
|
# ------------------------------------------------------------------------------
# Test Multiday Recurring Event Page
# ------------------------------------------------------------------------------
import sys
import datetime as dt
import pytz
import calendar
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from django.utils import timezone
from wagtail.core.models import Page, PageViewRestriction
from ls.joyous.utils.recurrence import Recurrence
from ls.joyous.utils.recurrence import DAILY, WEEKLY, YEARLY
from ls.joyous.utils.recurrence import SA, MO, TU, TH, FR
from ls.joyous.models import CalendarPage
from ls.joyous.models import MultidayRecurringEventPage, ExtraInfoPage
from .testutils import datetimetz, freeze_timetz
# ------------------------------------------------------------------------------
class Test(TestCase):
def setUp(self):
self.user = User.objects.create_user('i', 'i@joy.test', 's3cr3t')
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
Page.objects.get(slug='home').add_child(instance=self.calendar)
self.calendar.save()
self.calendar.save_revision().publish()
self.event = MultidayRecurringEventPage(
owner = self.user,
slug = "team-retreat",
title = "Team Retreat",
repeat = Recurrence(dtstart=dt.date(2000,1,1),
freq=YEARLY,
bymonth=8,
byweekday=FR(1)),
num_days = 3,
time_from = dt.time(18),
time_to = dt.time(16,30))
self.calendar.add_child(instance=self.event)
self.event.save_revision().publish()
def testGetEventsByDay(self):
events = MultidayRecurringEventPage.events.byDay(dt.date(2017,8,1),
dt.date(2017,8,31))
self.assertEqual(len(events), 31)
evod = events[3]
self.assertEqual(evod.date, dt.date(2017,8,4))
self.assertEqual(len(evod.days_events), 1)
self.assertEqual(len(evod.continuing_events), 0)
evod = events[4]
self.assertEqual(evod.date, dt.date(2017,8,5))
self.assertEqual(len(evod.days_events), 0)
self.assertEqual(len(evod.continuing_events), 1)
evod = events[5]
self.assertEqual(evod.date, dt.date(2017,8,6))
self.assertEqual(len(evod.days_events), 0)
self.assertEqual(len(evod.continuing_events), 1)
def testStatus(self):
with freeze_timetz("2014-08-01 17:00:00"):
self.assertEqual(self.event.status_text, "")
with freeze_timetz("2014-08-02 13:00:00"):
self.assertEqual(self.event.status_text, "This event has started.")
with freeze_timetz("2014-08-03 15:00:00"):
self.assertEqual(self.event.status_text, "This event has started.")
with freeze_timetz("2014-08-03 17:00:00"):
self.assertEqual(self.event.status_text, "")
def testNextOn(self):
request = RequestFactory().get("/test")
request.user = self.user
request.session = {}
oldEvent = MultidayRecurringEventPage(
owner = self.user,
slug = "same-old-thing",
title = "Same Ol'",
repeat = Recurrence(dtstart=dt.date(1971,1,1),
until=dt.date(1982,1,1),
freq=WEEKLY,
byweekday=SA(1)),
num_days = 2)
self.calendar.add_child(instance=oldEvent)
oldEvent.save_revision().publish()
with freeze_timetz("1974-08-01 17:00:00"):
self.assertEqual(oldEvent.next_date, dt.date(1974, 8, 3))
self.assertEqual(oldEvent._nextOn(request), "Saturday 3rd of August ")
with freeze_timetz("1982-01-01 17:00:00"):
self.assertIsNone(oldEvent.next_date)
self.assertEqual(oldEvent._nextOn(request), None)
def testWhen(self):
self.assertEqual(self.event.when,
"The first Friday of August for 3 days "
"starting at 6pm finishing at 4:30pm")
def testAt(self):
self.assertEqual(self.event.at.strip(), "6pm")
@freeze_timetz("2035-04-03 10:00:00")
def testPrevDate(self):
self.assertEqual(self.event.prev_date, dt.date(2034, 8, 4))
@freeze_timetz("2018-04-03 10:00:00")
def testFutureExceptions(self):
request = RequestFactory().get("/test")
request.user = self.user
request.session = {}
info2018 = ExtraInfoPage(owner = self.user,
overrides = self.event,
except_date = dt.date(2018, 8, 3),
extra_title = "Team Retreat 2018",
extra_information = "Weekend at Bernie's")
self.event.add_child(instance=info2018)
exceptions = self.event._futureExceptions(request)
self.assertEqual(len(exceptions), 1)
info = exceptions[0]
self.assertEqual(info.slug, "2018-08-03-extra-info")
self.assertEqual(info.extra_title, "Team Retreat 2018")
@freeze_timetz("2018-08-04 02:00:00")
def testPastExcludeExtraInfo(self):
info2018 = ExtraInfoPage(owner = self.user,
overrides = self.event,
except_date = dt.date(2018, 8, 3),
extra_title = "Team Retreat 2018",
extra_information = "Weekend at Bernie's")
self.event.add_child(instance=info2018)
before = self.event._past_datetime_from
self.assertEqual(before, datetimetz(2017, 8, 4, 18))
# ------------------------------------------------------------------------------
class Test40Day(TestCase):
def setUp(self):
self.user = User.objects.create_user('i', 'i@joy.test', 's3cr3t')
self.calendar = CalendarPage(owner = self.user,
slug = "events",
title = "Events")
Page.objects.get(slug='home').add_child(instance=self.calendar)
self.calendar.save()
self.calendar.save_revision().publish()
self.event = MultidayRecurringEventPage(
owner = self.user,
slug = "ice-festival",
title = "Ice Festival",
repeat = Recurrence(dtstart=dt.date(2000,12,25),
until=dt.date(2020,1,31),
freq=YEARLY,
bymonth=12,
byweekday=MO(4)),
num_days = 40)
self.calendar.add_child(instance=self.event)
self.event.save_revision().publish()
def testGetEventsByDay(self):
events = MultidayRecurringEventPage.events.byDay(dt.date(2020,1,1),
dt.date(2020,1,31))
self.assertEqual(len(events), 31)
evod = events[0]
self.assertEqual(evod.date, dt.date(2020,1,1))
self.assertEqual(len(evod.days_events), 0)
self.assertEqual(len(evod.continuing_events), 1)
thisEvent = evod.continuing_events[0]
self.assertEqual(thisEvent.title, "Ice Festival")
evod = events[10]
self.assertEqual(evod.date, dt.date(2020,1,11))
self.assertEqual(len(evod.continuing_events), 1)
thisEvent = evod.continuing_events[0]
self.assertEqual(thisEvent.title, "Ice Festival")
def testStatus(self):
with freeze_timetz("2019-12-20 13:00:00"):
self.assertEqual(self.event.status_text, "")
with freeze_timetz("2020-01-02 13:00:00"):
self.assertEqual(self.event.status_text, "This event has started.")
with freeze_timetz("2020-02-03 17:00:00"):
self.assertEqual(self.event.status_text, "These events have finished.")
def testAt(self):
self.assertEqual(self.event.at.strip(), "")
@freeze_timetz("2035-04-03 10:00:00")
def testNextDate(self):
self.assertEqual(self.event.next_date, None)
@freeze_timetz("2035-04-03 10:00:00")
def testPrevDate(self):
self.assertEqual(self.event.prev_date, dt.date(2019, 12, 23))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 47.839378
| 83
| 0.514784
|
acff2cae9c9cbdd6437f6f3506c22bfce1ee5b20
| 4,631
|
py
|
Python
|
tests/test_oauth2_backends.py
|
dqfort/django-oauth-toolkit
|
492a867499b50f348c28db4ef3e429e8f46dc412
|
[
"BSD-2-Clause-FreeBSD"
] | 1,406
|
2018-04-09T18:46:01.000Z
|
2022-03-30T00:42:23.000Z
|
tests/test_oauth2_backends.py
|
dqfort/django-oauth-toolkit
|
492a867499b50f348c28db4ef3e429e8f46dc412
|
[
"BSD-2-Clause-FreeBSD"
] | 625
|
2018-04-08T06:06:29.000Z
|
2022-03-28T20:48:19.000Z
|
tests/test_oauth2_backends.py
|
dqfort/django-oauth-toolkit
|
492a867499b50f348c28db4ef3e429e8f46dc412
|
[
"BSD-2-Clause-FreeBSD"
] | 378
|
2018-04-11T20:08:11.000Z
|
2022-03-30T17:53:21.000Z
|
import json
import pytest
from django.test import RequestFactory, TestCase
from oauth2_provider.backends import get_oauthlib_core
from oauth2_provider.models import redirect_to_uri_allowed
from oauth2_provider.oauth2_backends import JSONOAuthLibCore, OAuthLibCore
try:
from unittest import mock
except ImportError:
import mock
@pytest.mark.usefixtures("oauth2_settings")
class TestOAuthLibCoreBackend(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.oauthlib_core = OAuthLibCore()
def test_swappable_server_class(self):
self.oauth2_settings.OAUTH2_SERVER_CLASS = mock.MagicMock
oauthlib_core = OAuthLibCore()
self.assertTrue(isinstance(oauthlib_core.server, mock.MagicMock))
def test_form_urlencoded_extract_params(self):
payload = "grant_type=password&username=john&password=123456"
request = self.factory.post("/o/token/", payload, content_type="application/x-www-form-urlencoded")
uri, http_method, body, headers = self.oauthlib_core._extract_params(request)
self.assertIn("grant_type=password", body)
self.assertIn("username=john", body)
self.assertIn("password=123456", body)
def test_application_json_extract_params(self):
payload = json.dumps(
{
"grant_type": "password",
"username": "john",
"password": "123456",
}
)
request = self.factory.post("/o/token/", payload, content_type="application/json")
uri, http_method, body, headers = self.oauthlib_core._extract_params(request)
self.assertNotIn("grant_type=password", body)
self.assertNotIn("username=john", body)
self.assertNotIn("password=123456", body)
class TestCustomOAuthLibCoreBackend(TestCase):
"""
Tests that the public API behaves as expected when we override
the OAuthLibCoreBackend core methods.
"""
class MyOAuthLibCore(OAuthLibCore):
def _get_extra_credentials(self, request):
return 1
def setUp(self):
self.factory = RequestFactory()
def test_create_token_response_gets_extra_credentials(self):
"""
Make sures that extra_credentials parameter is passed to oauthlib
"""
payload = "grant_type=password&username=john&password=123456"
request = self.factory.post("/o/token/", payload, content_type="application/x-www-form-urlencoded")
with mock.patch("oauthlib.oauth2.Server.create_token_response") as create_token_response:
mocked = mock.MagicMock()
create_token_response.return_value = mocked, mocked, mocked
core = self.MyOAuthLibCore()
core.create_token_response(request)
self.assertTrue(create_token_response.call_args[0][4] == 1)
class TestJSONOAuthLibCoreBackend(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.oauthlib_core = JSONOAuthLibCore()
def test_application_json_extract_params(self):
payload = json.dumps(
{
"grant_type": "password",
"username": "john",
"password": "123456",
}
)
request = self.factory.post("/o/token/", payload, content_type="application/json")
uri, http_method, body, headers = self.oauthlib_core._extract_params(request)
self.assertIn("grant_type=password", body)
self.assertIn("username=john", body)
self.assertIn("password=123456", body)
class TestOAuthLibCore(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_validate_authorization_request_unsafe_query(self):
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "a_casual_token",
}
request = self.factory.get("/fake-resource?next=/fake", **auth_headers)
oauthlib_core = get_oauthlib_core()
oauthlib_core.verify_request(request, scopes=[])
@pytest.mark.parametrize(
"uri, expected_result",
# localhost is _not_ a loopback URI
[
("http://localhost:3456", False),
# only http scheme is supported for loopback URIs
("https://127.0.0.1:3456", False),
("http://127.0.0.1:3456", True),
("http://[::1]", True),
("http://[::1]:34", True),
],
)
def test_uri_loopback_redirect_check(uri, expected_result):
allowed_uris = ["http://127.0.0.1", "http://[::1]"]
if expected_result:
assert redirect_to_uri_allowed(uri, allowed_uris)
else:
assert not redirect_to_uri_allowed(uri, allowed_uris)
| 34.559701
| 107
| 0.664435
|
acff2d289bb483ed2d9715731f7aba93f0882d3a
| 6,151
|
py
|
Python
|
build/lib/PCV/imagesearch/imagesearch.py
|
Mastersam07/computer_vision_pcv
|
742c839156946335adc0fbf7868decced47a567e
|
[
"BSD-2-Clause"
] | 1
|
2019-03-07T14:33:36.000Z
|
2019-03-07T14:33:36.000Z
|
pcv/imagesearch/imagesearch.py
|
Mastersam07/computer_vision_pcv
|
742c839156946335adc0fbf7868decced47a567e
|
[
"BSD-2-Clause"
] | null | null | null |
pcv/imagesearch/imagesearch.py
|
Mastersam07/computer_vision_pcv
|
742c839156946335adc0fbf7868decced47a567e
|
[
"BSD-2-Clause"
] | null | null | null |
from numpy import *
import pickle
from sqlite3 import dbapi2 as sqlite
class Indexer(object):
def __init__(self,db,voc):
""" Initialize with the name of the database
and a vocabulary object. """
self.con = sqlite.connect(db)
self.voc = voc
def __del__(self):
self.con.close()
def db_commit(self):
self.con.commit()
def get_id(self,imname):
""" Get an entry id and add if not present. """
cur = self.con.execute(
"select rowid from imlist where filename='%s'" % imname)
res=cur.fetchone()
if res==None:
cur = self.con.execute(
"insert into imlist(filename) values ('%s')" % imname)
return cur.lastrowid
else:
return res[0]
def is_indexed(self,imname):
""" Returns True if imname has been indexed. """
im = self.con.execute("select rowid from imlist where filename='%s'" % imname).fetchone()
return im != None
def add_to_index(self,imname,descr):
""" Take an image with feature descriptors,
project on vocabulary and add to database. """
if self.is_indexed(imname): return
print ('indexing', imname)
# get the imid
imid = self.get_id(imname)
# get the words
imwords = self.voc.project(descr)
nbr_words = imwords.shape[0]
# link each word to image
for i in range(nbr_words):
word = imwords[i]
# wordid is the word number itself
self.con.execute("insert into imwords(imid,wordid,vocname) values (?,?,?)", (imid,word,self.voc.name))
# store word histogram for image
# use pickle to encode NumPy arrays as strings
self.con.execute("insert into imhistograms(imid,histogram,vocname) values (?,?,?)", (imid,pickle.dumps(imwords),self.voc.name))
def create_tables(self):
""" Create the database tables. """
self.con.execute('create table imlist(filename)')
self.con.execute('create table imwords(imid,wordid,vocname)')
self.con.execute('create table imhistograms(imid,histogram,vocname)')
self.con.execute('create index im_idx on imlist(filename)')
self.con.execute('create index wordid_idx on imwords(wordid)')
self.con.execute('create index imid_idx on imwords(imid)')
self.con.execute('create index imidhist_idx on imhistograms(imid)')
self.db_commit()
class Searcher(object):
def __init__(self,db,voc):
""" Initialize with the name of the database. """
self.con = sqlite.connect(db)
self.voc = voc
def __del__(self):
self.con.close()
def get_imhistogram(self,imname):
""" Return the word histogram for an image. """
im_id = self.con.execute(
"select rowid from imlist where filename='%s'" % imname).fetchone()
s = self.con.execute(
"select histogram from imhistograms where rowid='%d'" % im_id).fetchone()
# use pickle to decode NumPy arrays from string
return pickle.loads(str(s[0]))
def candidates_from_word(self,imword):
""" Get list of images containing imword. """
im_ids = self.con.execute(
"select distinct imid from imwords where wordid=%d" % imword).fetchall()
return [i[0] for i in im_ids]
def candidates_from_histogram(self,imwords):
""" Get list of images with similar words. """
# get the word ids
words = imwords.nonzero()[0]
# find candidates
candidates = []
for word in words:
c = self.candidates_from_word(word)
candidates+=c
# take all unique words and reverse sort on occurrence
tmp = [(w,candidates.count(w)) for w in set(candidates)]
tmp.sort(cmp=lambda x,y:cmp(x[1],y[1]))
tmp.reverse()
# return sorted list, best matches first
return [w[0] for w in tmp]
def query(self,imname):
""" Find a list of matching images for imname. """
h = self.get_imhistogram(imname)
candidates = self.candidates_from_histogram(h)
matchscores = []
for imid in candidates:
# get the name
cand_name = self.con.execute(
"select filename from imlist where rowid=%d" % imid).fetchone()
cand_h = self.get_imhistogram(cand_name)
cand_dist = sqrt( sum( self.voc.idf*(h-cand_h)**2 ) )
matchscores.append( (cand_dist,imid) )
# return a sorted list of distances and database ids
matchscores.sort()
return matchscores
def get_filename(self,imid):
""" Return the filename for an image id. """
s = self.con.execute(
"select filename from imlist where rowid='%d'" % imid).fetchone()
return s[0]
def tf_idf_dist(voc,v1,v2):
v1 /= sum(v1)
v2 /= sum(v2)
return sqrt( sum( voc.idf*(v1-v2)**2 ) )
def compute_ukbench_score(src,imlist):
""" Returns the average number of correct
images on the top four results of queries. """
nbr_images = len(imlist)
pos = zeros((nbr_images,4))
# get first four results for each image
for i in range(nbr_images):
pos[i] = [w[1]-1 for w in src.query(imlist[i])[:4]]
# compute score and return average
score = array([ (pos[i]//4)==(i//4) for i in range(nbr_images)])*1.0
return sum(score) / (nbr_images)
# import PIL and pylab for plotting
from PIL import Image
from pylab import *
def plot_results(src,res):
""" Show images in result list 'res'. """
figure()
nbr_results = len(res)
for i in range(nbr_results):
imname = src.get_filename(res[i])
subplot(1,nbr_results,i+1)
imshow(array(Image.open(imname)))
axis('off')
show()
| 32.544974
| 135
| 0.577142
|
acff2d2bf397078e6ab597118cfe588cf3d4e011
| 206
|
py
|
Python
|
cv/urls.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
cv/urls.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
cv/urls.py
|
ezraermy/mkcv
|
a75ec4144b313d1f92795da582d988634cd4ac7c
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path("", views.home, name='home'),
path('apply/', views.apply, name='apply'),
path('success/', views.success, name='success'),
]
| 25.75
| 52
| 0.645631
|
acff2e5b2e0d321bda319b42652899fea5278878
| 9,366
|
py
|
Python
|
test/command_line/test_search_beam_position.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 2
|
2021-03-17T11:25:46.000Z
|
2021-11-18T04:20:54.000Z
|
test/command_line/test_search_beam_position.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | null | null | null |
test/command_line/test_search_beam_position.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | null | null | null |
import glob
import os
import pytest
import scitbx
from cctbx import uctbx
from dxtbx.model import ExperimentList
from dxtbx.serialize import load
from dials.algorithms.indexing.test_index import run_indexing
from dials.command_line import search_beam_position
def test_search_i04_weak_data_image_range(mocker, run_in_tmpdir, dials_regression):
"""Perform a beam-centre search and check that the output is sane."""
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
reflection_file = os.path.join(data_dir, "full.pickle")
experiments_file = os.path.join(data_dir, "experiments_import.json")
args = [
experiments_file,
reflection_file,
"image_range=1,10",
"image_range=251,260",
"image_range=531,540",
"n_macro_cycles=4",
]
from rstbx.indexing_api import dps_extended
mocker.spy(dps_extended, "get_new_detector")
search_beam_position.run(args)
# Check that the last call to get_new_detector was with an offset of close to zero.
# The final call was to apply the "best" shift to the detector model before
# returning the updated experiments.
assert dps_extended.get_new_detector.call_args[0][1].elems == pytest.approx(
(0, 0, 0), abs=3e-2
)
assert os.path.exists("optimised.expt")
# Compare the shifts between the start and final detector models
experiments = load.experiment_list(experiments_file, check_format=False)
optimised_experiments = load.experiment_list("optimised.expt", check_format=False)
detector_1 = experiments[0].detector
detector_2 = optimised_experiments[0].detector
shift = scitbx.matrix.col(detector_1[0].get_origin()) - scitbx.matrix.col(
detector_2[0].get_origin()
)
assert shift.elems == pytest.approx((0.27, -0.12, 0.0), abs=1e-1)
def test_search_multiple(run_in_tmpdir, dials_regression):
"""Perform a beam-centre search and check that the output is sane.
Do the following:
1. Run dials.search_beam_centre on two datablocks and two pickled
reflection tables, as output by dials.find_spots;
a) Check that the program exits correctly;
b) Check that it produces the expected output datablock.
2. Check that the beam centre search has resulted in the expected shift
in detector origin.
"""
data_dir = os.path.join(dials_regression, "indexing_test_data", "trypsin")
pickle_path1 = os.path.join(data_dir, "strong_P1_X6_1_0-1.pickle")
pickle_path2 = os.path.join(data_dir, "strong_P1_X6_2_0-1.pickle")
experiments_path1 = os.path.join(data_dir, "datablock_P1_X6_1.json")
experiments_path2 = os.path.join(data_dir, "datablock_P1_X6_2.json")
args = [experiments_path1, experiments_path2, pickle_path1, pickle_path2]
search_beam_position.run(args)
assert os.path.exists("optimised.expt")
experiments = load.experiment_list(experiments_path1, check_format=False)
optimised_experiments = load.experiment_list("optimised.expt", check_format=False)
detector_1 = experiments[0].detector
detector_2 = optimised_experiments[0].detector
shift = scitbx.matrix.col(detector_1[0].get_origin()) - scitbx.matrix.col(
detector_2[0].get_origin()
)
assert shift.elems == pytest.approx((-0.518, 0.192, 0.0), abs=1e-1)
def test_index_after_search(dials_data, run_in_tmpdir):
"""Integrate the beam centre search with the rest of the toolchain
Do the following:
1. Take a known good experiment and perturbate the beam centre
2. Run dials.search_beam_centre on the perturbated beam centre and original
reflection table, check for expected output;
3. Run dials.index with the found beam centre and check that the expected
unit cell is obtained and that the RMSDs are smaller than or equal to some
expected values."""
insulin = dials_data("insulin_processed")
# load the original experiment and perturbate the beam centre by a small offset
experiments = load.experiment_list(insulin / "imported.expt", check_format=False)
original_origin = experiments[0].detector.hierarchy().get_origin()
shifted_origin = (
original_origin[0] - 1.3,
original_origin[1] + 1.5,
original_origin[2],
)
experiments[0].detector.hierarchy().set_local_frame(
experiments[0].detector.hierarchy().get_fast_axis(),
experiments[0].detector.hierarchy().get_slow_axis(),
shifted_origin,
)
assert experiments[0].detector.hierarchy().get_origin() == shifted_origin
experiments.as_file(run_in_tmpdir / "shifted.expt")
# search the beam centre
search_beam_position.run(
[
run_in_tmpdir.join("shifted.expt").strpath,
insulin.join("strong.refl").strpath,
]
)
assert os.path.exists("optimised.expt")
# check we can actually index the resulting optimized experiments
expected_unit_cell = uctbx.unit_cell(
(67.655, 67.622, 67.631, 109.4583, 109.4797, 109.485)
)
expected_rmsds = (0.3, 0.3, 0.005)
expected_hall_symbol = " P 1"
run_indexing(
insulin / "strong.refl",
run_in_tmpdir / "optimised.expt",
run_in_tmpdir,
[],
expected_unit_cell,
expected_rmsds,
expected_hall_symbol,
)
def test_search_single(run_in_tmpdir, dials_regression):
"""Perform a beam-centre search and check that the output is sane.
Do the following:
1. Run dials.search_beam_centre on a single datablock and pickled
reflection table, as output by dials.find_spots;
a) Check that the program exits correctly;
b) Check that it produces the expected output datablock.
2. Check that the beam centre search has resulted in the expected shift
in detector origin.
"""
data_dir = os.path.join(dials_regression, "indexing_test_data", "phi_scan")
pickle_path = os.path.join(data_dir, "strong.pickle")
experiments_path = os.path.join(data_dir, "datablock.json")
search_beam_position.run([experiments_path, pickle_path])
assert os.path.exists("optimised.expt")
experiments = load.experiment_list(experiments_path, check_format=False)
original_imageset = experiments.imagesets()[0]
optimized_experiments = load.experiment_list("optimised.expt", check_format=False)
detector_1 = original_imageset.get_detector()
detector_2 = optimized_experiments.detectors()[0]
shift = scitbx.matrix.col(detector_1[0].get_origin()) - scitbx.matrix.col(
detector_2[0].get_origin()
)
assert shift.elems == pytest.approx((-0.976, 2.497, 0.0), abs=1e-1)
def test_search_small_molecule(dials_data, run_in_tmpdir):
"""Perform a beam-centre search on a multi-sequence data set..
Do the following:
1. Run dials.search_beam_centre on a single datablock and pickled
reflection table containing multiple experiment IDs, as output by
dials.find_spots;
a) Check that the program exits correctly;
b) Check that it produces the expected output datablock.
2. Check that the beam centre search has resulted in the expected shift
in detector origin.
"""
data = dials_data("l_cysteine_dials_output")
experiments_path = data.join("imported.expt").strpath
refl_path = data.join("strong.refl").strpath
search_beam_position.run([experiments_path, refl_path])
assert os.path.exists("optimised.expt")
experiments = load.experiment_list(experiments_path, check_format=False)
optimised_experiments = load.experiment_list("optimised.expt", check_format=False)
for old_expt, new_expt in zip(experiments, optimised_experiments):
# assert that the detector fast/slow axes are unchanged from the input experiments
# the last experiment actually does have a different detector model
assert (
old_expt.detector[0].get_slow_axis() == new_expt.detector[0].get_slow_axis()
)
assert (
old_expt.detector[0].get_fast_axis() == new_expt.detector[0].get_fast_axis()
)
shift = scitbx.matrix.col(
old_expt.detector[0].get_origin()
) - scitbx.matrix.col(new_expt.detector[0].get_origin())
assert shift.elems == pytest.approx((0.091, -1.11, 0), abs=1e-2)
def test_multi_sweep_fixed_rotation(dials_regression, run_in_tmpdir):
data_dir = os.path.join(dials_regression, "indexing_test_data", "multi_sweep")
reflection_files = sorted(
glob.glob(os.path.join(data_dir, "SWEEP[1,2]", "index", "*_strong.pickle"))
)
experiment_files = sorted(
glob.glob(
os.path.join(data_dir, "SWEEP[1,2]", "index", "*_datablock_import.json")
)
)
search_beam_position.run(reflection_files + experiment_files)
assert os.path.exists("optimised.expt")
experiments = ExperimentList()
for path in experiment_files:
experiments.extend(load.experiment_list(path, check_format=False))
optimised_experiments = load.experiment_list("optimised.expt", check_format=False)
for orig_expt, new_expt in zip(experiments, optimised_experiments):
shift = scitbx.matrix.col(
orig_expt.detector[0].get_origin()
) - scitbx.matrix.col(new_expt.detector[0].get_origin())
print(shift)
assert shift.elems == pytest.approx((2.293, -0.399, 0), abs=1e-2)
| 40.37069
| 90
| 0.710869
|
acff2faf63276973cc7afedea0750e7f2d760900
| 183
|
py
|
Python
|
q3/q3/config.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 2
|
2021-03-16T05:48:36.000Z
|
2021-10-11T01:55:48.000Z
|
q3/q3/config.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | null | null | null |
q3/q3/config.py
|
virtimus/makaronLab
|
10b9be7d7d65d3da6219f929ea7070dd5fed3a81
|
[
"0BSD"
] | 1
|
2021-03-16T05:48:39.000Z
|
2021-03-16T05:48:39.000Z
|
# api/script console instance handle, initialised from boot.py on app startup/editorFrame init
consoleInstance = None
#same for console widget instance
consoleWidgetInstance = None
| 26.142857
| 94
| 0.819672
|
acff2fff1672cf13d1607521643d880fffb5bb51
| 438
|
py
|
Python
|
src/sofastorage/key.py
|
SlumberDemon/SofaStorage
|
6de843688d70cb6ed39fb83cc42017572445bb30
|
[
"MIT"
] | null | null | null |
src/sofastorage/key.py
|
SlumberDemon/SofaStorage
|
6de843688d70cb6ed39fb83cc42017572445bb30
|
[
"MIT"
] | null | null | null |
src/sofastorage/key.py
|
SlumberDemon/SofaStorage
|
6de843688d70cb6ed39fb83cc42017572445bb30
|
[
"MIT"
] | null | null | null |
"""
[!] DISCLAIMER:
This is the public key to identify the root database.
Do not change this key unless you have a private key.
To get your own key please head to the following link:
https://deta.sh & create a new project then use that key.
Note: However no one will be able to get data only with this key.
They have to know the drive password in order to interact with the drive .
"""
KEY = "a0lrdx1u_SEo6cFew8Vy2hUiecp2DHfPbiwkV3gYG"
| 33.692308
| 74
| 0.760274
|
acff301a567a1f8a2b43c249c66538ad3cee7691
| 4,859
|
py
|
Python
|
Training_Data/randomness_test.py
|
rainonej/rock_paper_scissors
|
2904b3b506cd0a33fff6ba128859b2e762c83370
|
[
"MIT"
] | null | null | null |
Training_Data/randomness_test.py
|
rainonej/rock_paper_scissors
|
2904b3b506cd0a33fff6ba128859b2e762c83370
|
[
"MIT"
] | null | null | null |
Training_Data/randomness_test.py
|
rainonej/rock_paper_scissors
|
2904b3b506cd0a33fff6ba128859b2e762c83370
|
[
"MIT"
] | null | null | null |
import numpy as np
#import pickle
#A = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = list, comments = '#', delimiter = ',', usecols = (0,1,2,3))
#A = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (2), ndmin = 1)
#B = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (3), ndmin = 1)
#A = np.genfromtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (2), ndmin = 1)
#with open('Rock_Paper_Scissors_Raw.pkl', 'wb') as output:
# pickle.dump(A, output, pickle.HIGHEST_PROTOCOL)
'''The data is now in a pkl file, stored as a list of lists. Each row looks like this:
[game_id,game_round_id,player_one_throw,player_two_throw]
We don't know what 1, 2, and 3 represent. Or even which one beats which.
But presumebly 0 means the contestant didn't enter a input, thus walks away.
They can also both walk away at the same time.
We do know that game_rounds can't end in a tie, and a game lasts until there are 3 wins, or someone walks away.'''
def get_frequency(data, options):
length = len(data)
frequency = {}
for opt in options: #labeling each option
frequency[opt] = 0
for j in range(0,length): #Counting each option
frequency[data[j]] += 1
for opt in options:
frequency[opt] = frequency[opt]/length
return frequency
def process_data(data, n):
'''input a 2-dim list of numbers. First collum is gameID, second is the outputs for each round.
returns a list of all singles, pairs, tripples, quadupples, etc... up to n. '''
processed_data =[] #creates the empty data
for i in range(0,n):
processed_data.append([])
options = [1,2,3] #the valid outputs not to skip\
skip = 0 #the only value to skip
L = len(data) #don't want to calculate this a lot
#Setting up how the counters work
game_id = 0 #should reflect the actual game_id
round = 0 #should not be the actual round
for i in range(0,L):
if (data[i][0] != game_id):
game_id += 1
round = 0
if (data[i][1] != skip):
round += 1
#does the singleton first
processed_data[0].append(data[i][1])
for j in range(2,n+1):
if (round >= j):
#print(processed_data)
#print('j =', j)
processed_data[j-1].append(processed_data[0][-j:-1] + [processed_data[0][-1]])
return processed_data
from itertools import product
def prod(set,k):
'''takes in a list of n elements. Returns a list of all possible permutations of k elements out of the set.'''
return list(product(set, repeat = k))
#return list(map(list, list(product(set, repeat = k))))
def get_multi_frequency(data, n):
'''input a 2-dim list of numbers. First collum is gameID, second is the outputs for each round.
returns a the frequency of evey singles, pairs, tripples, quadupples, etc... up to n. '''
processed_data =[] #creates the empty data
for i in range(0,n):
processed_data.append([])
options = [1,2,3] #the possible options
multi_options = []
for i in range(1,n+1):
multi_options += [prod(options, i)]
#creat the frequency table
frequency = []
for i in range(0,n):
frequency.append({})
for opt in multi_options[i]:
#print(frequency)
#print(multi_options[i])
frequency[-1][opt] = 0
frequency[-1]['total'] = 0
#print(frequency)
skip = 0 #the only value to skip
L = len(data) #don't want to calculate this a lot
#Setting up how the counters work
game_id = 0 #should reflect the actual game_id
round = 0 #should not be the actual round
for i in range(0,L):
if (data[i][0] != game_id):
game_id = data[i][0]
round = 0
if (data[i][1] != skip):
round += 1
#do the singleton first
#print(frequency)
frequency[0][tuple([data[i][1]])] += 1
frequency[0]['total'] += 1
processed_data[0].append(data[i][1])
for j in range(2,n+1):
if (round >= j):
#print(processed_data)
#print('j =', j)
#print(processed_data[0][-j:-1] + [processed_data[0][-1]])
#print(tuple([processed_data[0][-j:-1] + [processed_data[0][-1]]]))
frequency[j-1][ tuple(processed_data[0][-j:-1] + [processed_data[0][-1]]) ] += 1
frequency[j-1]['total'] +=1
#processed_data[j-1].append(processed_data[0][-j:-1] + [processed_data[0][-1]])
return (processed_data,frequency)
#An = np.loadtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (0,2), ndmin = 2)
An = np.genfromtxt("Rock_Paper_Scissors_Raw.txt", dtype = int, comments = '#', delimiter = ',', usecols = (0,2), max_rows = 5000)
#Am = An[0:100]
An = An.tolist()
#new_data = process_data(Am, 2)
#print(new_data)
(A,B) = get_multi_frequency(An, 4)
#print(A)
print(B)
'''
C = get_frequency(A, [1,2,3, 0])
print(C)
D = get_frequency(B, [1,2,3,0])
print(D)
'''
#This shows that Humans (or wherever this data came from) is not a uniform random distribution
| 29.993827
| 129
| 0.663305
|
acff3187483d88de45c658726bd27c3fb4b15a5a
| 1,008
|
py
|
Python
|
py/interview/PopOfStack.py
|
shhuan/algorithms
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
[
"MIT"
] | null | null | null |
py/interview/PopOfStack.py
|
shhuan/algorithms
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
[
"MIT"
] | null | null | null |
py/interview/PopOfStack.py
|
shhuan/algorithms
|
2830c7e2ada8dfd3dcdda7c06846116d4f944a27
|
[
"MIT"
] | 1
|
2022-03-09T04:52:55.000Z
|
2022-03-09T04:52:55.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft
给一个栈的入栈序列,判断另外一个序列是否是它的出栈序列
Sample Input
1 2 3; 2 1 3
Sample Output
True
分析:
直接模拟
"""
__author__ = 'huangshuangquan'
def isPopSeq(pushArray, popArray):
if not pushArray or not popArray:
return False
# 注意这个判断条件不要写掉
if len(pushArray) != len(popArray):
return False
stack = []
pushIndex = 0
popIndex = 0
while pushIndex < len(pushArray):
stack.append(pushArray[pushIndex])
pushIndex += 1
while stack and stack[-1] == popArray[popIndex]:
stack.pop()
popIndex += 1
return not stack
if __name__ == "__main__":
pushArray = [1, 2, 3]
popArray = [1, 3, 2]
print(isPopSeq(pushArray, popArray))
popArray = [3, 1, 2]
print(isPopSeq(pushArray, popArray))
popArray = [1, 2, 3]
print(isPopSeq(pushArray, popArray))
popArray = [2, 1, 3]
print(isPopSeq(pushArray, popArray))
popArray = [3, 1, 3]
print(isPopSeq(pushArray, popArray))
| 18.327273
| 56
| 0.608135
|
acff329020d914c8b361bd9504b1d6120d79fabb
| 11,833
|
py
|
Python
|
modules/s3/s3summary.py
|
waidyanatha/deprecated.sambro-eden
|
62e180703a2f16d5f8fcd532335d8287b76a8175
|
[
"MIT"
] | 1
|
2016-12-22T09:31:22.000Z
|
2016-12-22T09:31:22.000Z
|
modules/s3/s3summary.py
|
gurlinthewurld/eden
|
726aea55c95ee33f48dace63f76496e22e529157
|
[
"MIT"
] | null | null | null |
modules/s3/s3summary.py
|
gurlinthewurld/eden
|
726aea55c95ee33f48dace63f76496e22e529157
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Resource Summary Pages
@copyright: 2013 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{gluon}} <http://web2py.com>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import *
from gluon.storage import Storage
from s3filter import S3FilterForm
from s3gis import MAP
from s3rest import S3Method
# =============================================================================
class S3Summary(S3Method):
""" Resource Summary Pages """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST interface
@param r: the S3Request
@param attr: controller attributes
"""
if "w" in r.get_vars:
# Ajax-request for a specific widget
return self.ajax(r, **attr)
else:
# Full page request
# @todo: check for proper format + method
return self.summary(r, **attr)
# -------------------------------------------------------------------------
def summary(self, r, **attr):
"""
Render the summary page
@param r: the S3Request
@param attr: controller attributes
"""
output = {}
resource = self.resource
get_config = resource.get_config
# Get Summary Page Configuration
config = self._get_config(resource)
# Page title
crud_string = self.crud_string
title = crud_string(self.tablename, "title_list")
output["title"] = title
# Tabs
tablist = UL()
sections = []
commons = []
# Active tab
if "t" in r.get_vars:
active_tab = int(r.get_vars["t"])
else:
active_tab = 0
active_map = None
# Render sections
tab_idx = 0
widget_idx = 0
targets = []
pending = []
for section in config:
common = section.get("common")
# Section container
section_id = section["name"]
s = DIV(_class="section-container", _id=section_id)
if not common:
# Label
label = section["label"]
translate = section.get("translate", True)
if isinstance(label, basestring) and translate:
self.label = current.T(label)
else:
self.label = label
# Add tab
tablist.append(LI(A(label, _href="#%s" % section_id)))
if common or active_tab == tab_idx:
visible = True
else:
visible = False
# Widgets
widgets = section.get("widgets", [])
for widget in widgets:
# Widget ID
widget_id = "summary-%s" % widget_idx
# Make sure widgets include the widget ID when
# generating Ajax URLs:
r.get_vars["w"] = r.vars["w"] = widget_id
# Append to filter targets
filterable = widget.get("filterable", True)
if filterable:
targets.append(widget_id)
if not visible and widget.get("ajax_init"):
pending.append(widget_id)
# Apply method
method = widget.get("method")
if callable(method):
content = method(r,
widget_id=widget_id,
visible=visible,
**attr)
else:
handler = r.get_widget_handler(method)
if handler is not None:
content = handler(r,
method=method,
widget_id=widget_id,
visible=visible,
**attr)
else:
r.error(405, r.ERROR.BAD_METHOD)
# Add content to section
if isinstance(content, dict):
for k, v in content.items():
if k not in ("tabs", "sections", "widget"):
output[k] = v
content = content.get("widget", "EMPTY")
elif active_tab == tab_idx and isinstance(content, MAP):
active_map = content
s.append(DIV(content,
_id="%s-container" % widget_id,
_class="widget-container"))
widget_idx += 1
if common:
commons.append(s)
else:
sections.append(s)
tab_idx += 1
# Remove widget ID
r.get_vars.pop("w", None)
# Add tabs + sections to output
if len(sections) > 1:
output["tabs"] = tablist
# Hide tabbed sections initially to avoid visible artifacts
# in slow page loads (S3.search.summary_tabs will un-hide the active one):
for s in sections:
s.add_class("hide")
else:
# Hide tabs if there's only one section (but then don't hide
# the section!)
output["tabs"] = ""
output["sections"] = sections
# Add common sections to output
output["common"] = commons
# Filter targets
target = " ".join(targets)
# Filter form
filter_ajax = True
form_id = "summary-filter-form"
filter_widgets = get_config("filter_widgets")
if filter_widgets and not self.hide_filter:
# Where to retrieve filtered data from:
if active_tab != 0:
submit_url_vars = {"t": active_tab}
else:
submit_url_vars = {}
filter_submit_url = attr.get("filter_submit_url")
if not filter_submit_url:
_vars = self._remove_filters(r.get_vars)
_vars.update(submit_url_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = attr.get("filter_ajax_url",
r.url(method="filter",
vars={},
representation="options"))
filter_formstyle = get_config("filter_formstyle")
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=filter_ajax,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id=form_id)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
output["filter_form"] = filter_form.html(fresource,
r.get_vars,
target=target,
alias=alias)
else:
# Render as empty string to avoid the exception in the view
output["filter_form"] = ""
# View
response = current.response
response.view = self._view(r, "summary.html")
if len(sections) > 1:
# Provide a comma-separated list of initially hidden widgets
# which are rendered empty and need a trigger to Ajax-load
# their data layer (e.g. maps, reports):
pending = ",".join(pending) if pending else "null"
# Render the Sections as Tabs
script = '''S3.search.summary_tabs("%s",%s,"%s")''' % \
(form_id, active_tab, pending)
response.s3.jquery_ready.append(script)
if active_map:
# If there is a map on the active tab then we need to add
# a callback to the Map JS Loader
active_map.callback = '''S3.search.summary_maps("%s")''' % form_id
return output
# -------------------------------------------------------------------------
def ajax(self, r, **attr):
"""
Render a specific widget for pulling-in via AJAX
@param r: the S3Request
@param attr: controller attributes
"""
# Get Summary Page Configuration
config = self._get_config(self.resource)
widget_id = r.get_vars.get("w")
i = 0
for section in config:
widgets = section.get("widgets", [])
for widget in widgets:
if widget_id == "summary-%s" % i:
method = widget.get("method", None)
output = None
if callable(method):
output = method(r, widget_id=widget_id, **attr)
else:
handler = r.get_widget_handler(method)
if handler is not None:
output = handler(r,
method=method,
widget_id=widget_id,
**attr)
else:
r.error(405, r.ERROR.BAD_METHOD)
return output
i += 1
# Not found?
return None
# -------------------------------------------------------------------------
@staticmethod
def _get_config(resource):
"""
Get the summary page configuration
@param resource: the target S3Resource
"""
get_config = resource.get_config
config = get_config("summary",
current.deployment_settings.get_ui_summary())
if not config:
config = [{"name": "table",
"label": "Table",
"widgets": [{"name": "datatable",
"method": "datatable",
}]
}]
return config
# END =========================================================================
| 36.297546
| 86
| 0.470971
|
acff3375f07451b8c4ec82e5b94807cfd9638031
| 263
|
py
|
Python
|
toontown/uberdog/DistributedPartyManagerUD.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/uberdog/DistributedPartyManagerUD.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/uberdog/DistributedPartyManagerUD.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedPartyManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyManagerUD')
| 43.833333
| 85
| 0.882129
|
acff34c9973ef3f850f3d795d0395948780e83c5
| 4,534
|
py
|
Python
|
handlers/main.py
|
gitdaniel228/realtor
|
4366d57b064be87b31c8a036b3ed7a99b2036461
|
[
"BSD-3-Clause"
] | null | null | null |
handlers/main.py
|
gitdaniel228/realtor
|
4366d57b064be87b31c8a036b3ed7a99b2036461
|
[
"BSD-3-Clause"
] | null | null | null |
handlers/main.py
|
gitdaniel228/realtor
|
4366d57b064be87b31c8a036b3ed7a99b2036461
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import os.path
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
from models import Listing
from google.appengine.ext import db
import logging
from urllib import quote
from google.appengine.api import urlfetch
import geocoder
import datetime
import appengine_admin
from handlers import restful
from google.appengine.api import users
from google.appengine.api import oauth
from handlers import site, api
import locale
import sys
sys.path.append(".")
template.register_template_library(
'django.contrib.humanize.templatetags.humanize')
template.register_template_library('templatelib')
def get_current_user():
user = users.get_current_user();
logging.info("User (users): %s" % user)
if not user:
ouser = oauth.get_current_user();
logging.info("User (oauth): %s" % user)
return user
class NotFoundHandler(restful.Controller):
def get(self):
logging.debug("NotFoundHandler#get")
template_data = {}
self.render(template_data, '404.html')
class UnauthorizedHandler(webapp.RequestHandler):
def get(self):
logging.debug("UnauthorizedHandler#get")
self.error(403)
def make_static_handler(template_file):
"""Creates a webapp.RequestHandler type that renders the given template
to the response stream."""
class StaticHandler(webapp.RequestHandler):
def get(self):
self.response.out.write(template.render(
os.path.join(os.path.dirname(__file__), template_file),
{'current_user': get_current_user()}))
return StaticHandler
class ListingForm(djangoforms.ModelForm):
class Meta:
model = Listing
exclude = ['added_by','location_geocells', 'location']
"""
def save(self):
if not self.key():
self.createDate = datetime.date.today()
self.updated = datetime.datetime.today()
super(Listing, self).save()
"""
class ListingDetailsPage(webapp.RequestHandler):
def get (self):
template_file = '../templates/listing.html'
id = int(self.request.get('id'))
listing = Listing.get(db.Key.from_path('Listing', id))
self.response.out.write(template.render(
os.path.join(os.path.dirname(__file__), template_file),
{'current_user': get_current_user(), 'listing' : listing}))
class AdminListing(appengine_admin.ModelAdmin):
model = Listing
listFields = ('address', 'price', 'createDate', 'lastUpdate')
editFields = ('address', 'price', 'baths', 'beds', 'size', 'description', 'propertyType', 'amenities', 'portfolio', 'listingAgentName', 'listingAgentPhone', 'listingAgentPhone', 'listingAgentCompany', 'listingAgentEmail')
readonlyFields = ('createDate', 'lastUpdate', 'author')
# Register to admin site
appengine_admin.register(AdminListing)
def main():
application = webapp.WSGIApplication([
('/', make_static_handler('../templates/index.html')),
('/listing', ListingDetailsPage),
(r'^(/ui)(.*)$', appengine_admin.Admin),
#API
('/403.html', site.UnauthorizedHandler),
('/404.html', site.NotFoundHandler),
(r'/api/(.+)/services', api.ServicesListHandler),
(r'/api/(.+)/services/(.+)/events', api.EventsListHandler),
(r'/api/(.+)/services/(.+)/events/current', api.CurrentEventHandler),
(r'/api/(.+)/services/(.+)/events/(.+)', api.EventInstanceHandler),
(r'/api/(.+)/services/(.+)', api.ServiceInstanceHandler),
(r'/api/(.+)/statuses', api.StatusesListHandler),
(r'/api/(.+)/statuses/(.+)', api.StatusInstanceHandler),
(r'/api/(.+)/status-images', api.ImagesListHandler),
(r'/api/(.+)/levels', api.LevelsListHandler),
(r'/api/', api.NotFoundHandler),
(r'/api/', api.NotFoundHandler),
#SITE
#(r'/services/(.+)/(.+)/(.+)/(.+)', serviceHandler),
#(r'/services/(.+)/(.+)/(.+)', serviceHandler),
#(r'/services/(.+)/(.+)', serviceHandler),
#(r'/services/(.+)', serviceHandler),
(r'/documentation/credentials', site.ProfileHandler),
(r'/documentation/verify', site.VerifyAccessHandler),
(r'/documentation/(.+)', site.DocumentationHandler),
],
debug=('Development' in os.environ['SERVER_SOFTWARE']))
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| 32.618705
| 225
| 0.650199
|
acff378409fcceab102494d9abc7bef394db5eab
| 2,301
|
py
|
Python
|
openstack/network/v2/metering_label.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 43
|
2018-12-19T08:39:15.000Z
|
2021-07-21T02:45:43.000Z
|
openstack/network/v2/metering_label.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 11
|
2019-03-17T13:28:56.000Z
|
2020-09-23T23:57:50.000Z
|
openstack/network/v2/metering_label.py
|
wangrui1121/huaweicloud-sdk-python
|
240abe00288760115d1791012d4e3c4592d77ad1
|
[
"Apache-2.0"
] | 47
|
2018-12-19T05:14:25.000Z
|
2022-03-19T15:28:30.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Huawei has modified this source file.
#
# Copyright 2018 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from openstack.network import network_service
from openstack import resource2 as resource
class MeteringLabel(resource.Resource):
resource_key = 'metering_label'
resources_key = 'metering_labels'
base_path = '/metering/metering-labels'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'name',
is_shared='shared',
project_id='tenant_id'
)
# Properties
#: Description of the metering label.
description = resource.Body('description')
#: Name of the metering label.
name = resource.Body('name')
#: The ID of the project this metering label is associated with.
project_id = resource.Body('tenant_id')
#: Indicates whether this label is shared across all tenants.
#: *Type: bool*
is_shared = resource.Body('shared', type=bool)
| 37.112903
| 87
| 0.692742
|
acff383471e40ccb2d9232aa408cb89cb4f06d22
| 4,399
|
py
|
Python
|
jina/executors/indexers/vector/faiss.py
|
rohan1chaudhari/jina
|
5ef04a4c982f0aa967bfc10b443c3f397c5e790f
|
[
"Apache-2.0"
] | null | null | null |
jina/executors/indexers/vector/faiss.py
|
rohan1chaudhari/jina
|
5ef04a4c982f0aa967bfc10b443c3f397c5e790f
|
[
"Apache-2.0"
] | null | null | null |
jina/executors/indexers/vector/faiss.py
|
rohan1chaudhari/jina
|
5ef04a4c982f0aa967bfc10b443c3f397c5e790f
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple
import numpy as np
from . import BaseNumpyIndexer
from ...devices import FaissDevice
class FaissIndexer(FaissDevice, BaseNumpyIndexer):
"""Faiss powered vector indexer
For more information about the Faiss supported parameters and installation problems, please consult:
- https://github.com/facebookresearch/faiss
.. note::
Faiss package dependency is only required at the query time.
"""
def __init__(self, index_key: str, train_filepath: str = None,
distance: str = 'l2', nprobe: int = 1, *args, **kwargs):
"""
Initialize an Faiss Indexer
:param index_key: index type supported by ``faiss.index_factory``
:param train_filepath: the training data file path, e.g ``faiss.tgz`` or `faiss.npy`. The data file is expected
to be either `.npy` file from `numpy.save()` or a `.tgz` file from `NumpyIndexer`.
:param distance: 'l2' or 'inner_product' accepted. Determines which distances to optimize by FAISS
:param nprobe: Number of clusters to consider at search time.
.. highlight:: python
.. code-block:: python
# generate a training file in `.tgz`
import gzip
import numpy as np
from jina.executors.indexers.vector.faiss import FaissIndexer
train_filepath = 'faiss_train.tgz'
train_data = np.random.rand(10000, 128)
with gzip.open(train_filepath, 'wb', compresslevel=1) as f:
f.write(train_data.astype('float32'))
indexer = FaissIndexer('PCA64,FLAT', train_filepath)
# generate a training file in `.npy`
train_filepath = 'faiss_train'
np.save(train_filepath, train_data)
indexer = FaissIndexer('PCA64,FLAT', train_filepath)
"""
super().__init__(*args, **kwargs)
self.index_key = index_key
self.train_filepath = train_filepath
self.distance = distance
self.nprobe = nprobe
def build_advanced_index(self, vecs: 'np.ndarray'):
"""Load all vectors (in numpy ndarray) into Faiss indexers """
import faiss
metric = faiss.METRIC_L2
if self.distance == 'inner_product':
metric = faiss.METRIC_INNER_PRODUCT
if self.distance not in {'inner_product', 'l2'}:
self.logger.warning('Invalid distance metric for Faiss index construction. Defaulting to l2 distance')
self._index = self.to_device(index=faiss.index_factory(self.num_dim, self.index_key, metric))
if not self.is_trained:
_train_data = self._load_training_data(self.train_filepath)
if _train_data is None:
self.logger.warning('loading training data failed.')
return None
self.train(_train_data)
self._index.add(vecs.astype('float32'))
self._index.nprobe = self.nprobe
return self._index
def query(self, keys: 'np.ndarray', top_k: int, *args, **kwargs) -> Tuple['np.ndarray', 'np.ndarray']:
dist, ids = self.query_handler.search(keys, top_k)
return self.int2ext_key[ids], dist
def train(self, data: 'np.ndarray', *args, **kwargs):
_num_samples, _num_dim = data.shape
if not self.num_dim:
self.num_dim = _num_dim
if self.num_dim != _num_dim:
raise ValueError('training data should have the same number of features as the index, {} != {}'.format(
self.num_dim, _num_dim))
self._index.train(data)
def _load_training_data(self, train_filepath):
result = None
try:
result = self._load_gzip(train_filepath)
if result is not None:
return result
except OSError as e:
self.logger.info('not a gzippped file, {}'.format(e))
try:
result = np.load(train_filepath)
if isinstance(result, np.lib.npyio.NpzFile):
self.logger.warning('.npz format is not supported. Please save the array in .npy format.')
result = None
except Exception as e:
self.logger.error('loading training data failed, filepath={}, {}'.format(train_filepath, e))
return result
| 40.731481
| 119
| 0.627415
|
acff38ce204373a0dc4c67bb2da5f9e606834dc7
| 1,218
|
py
|
Python
|
nadine/management/commands/checkin_anniversary.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | 1
|
2019-08-15T00:10:38.000Z
|
2019-08-15T00:10:38.000Z
|
nadine/management/commands/checkin_anniversary.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
nadine/management/commands/checkin_anniversary.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from django.utils.timezone import localtime, now
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from nadine import email
class Command(BaseCommand):
help = "Check-in with users on their anniversary"
def handle(self, *args, **options):
for u in User.helper.active_members():
d = u.profile.duration()
if d.years and not d.months and not d.days:
email.announce_anniversary(u)
email.send_edit_profile(u)
# Copyright 2018 Office Nomads LLC (http://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 52.956522
| 579
| 0.751232
|
acff3a4664c55df057248f0f980c054886cf377c
| 157
|
py
|
Python
|
tcod2/exceptions.py
|
Javascript-void0/Pygame
|
96b4cc9b414a40e684a833306dcf5d191a4f4aa5
|
[
"MIT"
] | 1
|
2021-04-09T01:36:32.000Z
|
2021-04-09T01:36:32.000Z
|
tcod2/exceptions.py
|
Javascript-void0/Pygame
|
96b4cc9b414a40e684a833306dcf5d191a4f4aa5
|
[
"MIT"
] | null | null | null |
tcod2/exceptions.py
|
Javascript-void0/Pygame
|
96b4cc9b414a40e684a833306dcf5d191a4f4aa5
|
[
"MIT"
] | null | null | null |
class Impossible(Exception):
"""Exception raised when an action is impossible to be performed.
The reason is given as the exception message.
"""
| 31.4
| 69
| 0.719745
|
acff3aa84728f708d10004b4d1405a14fe1dea3c
| 2,309
|
py
|
Python
|
legacy/miscellaneous/tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/miscellaneous/tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
legacy/miscellaneous/tdf.py
|
solar464/TDF_deterministic_encryption
|
ff9dceacb37ce7727a8205cc72a4d928d37cce6f
|
[
"MIT"
] | null | null | null |
import owfe
from floodberry.floodberry_ed25519 import GE25519 as GE
from random import randint
"""
Incomplete implementation of Strong TDF (from smooth recyclable OWFE)
Section 4 of https://eprint.iacr.org/2018/872.pdf
"""
class IndexKey:
def __init__(self, pp, a, CT):
assert len(pp) == len(CT) and len(pp) == len(a): "n values inconsistent"
self.pp = pp
self.CT = CT
self.a = a
self.m = len(pp)
@property
def pp(self):
return self.pp
@property
def a(self):
return self.a
@property
def CT(self):
return self.CT
@property
def m(self):
return self.m
class TrapdoorKey:
def __init__(self, pp, PP):
self.pp = pp
self.a = a
self.P = P
self.m = len(pp)
@property
def pp(self):
return self.pp
@property
def a(self):
return self.a
@property
def P(self):
return self.P
@property
def m(self):
return self.m
def Mir(pp, x, CT, a):
bin_x = str_to_bin_list(x)
n = len(a)
M = [None for _ in range(n))]
for i in range(n):
b = D(pp, CT[i][bin_x[i]], x)
if bin_x[i]:
M[i] = (a[i] ^ b, b)
else:
M[i] = (b, a[i] ^ b)
return M
def RSum(M):
xor = lambda i,j: i^j
a = [reduce(xor, v) for v in a]
def KG(m: int, lmbd: int = 254):
limit = (1<<lmbd)
pp = owfe.K(m, lmbd)
P = randint(limit,size=(m,2))
CT = [(owfe.E1(pp,j,0,P[j][0]), owfe.E1(pp,j,1,P[j][1])) for j in range(m)]
a = randint(1, size=m)
return IndexKey(pp, a, CT), TrapdoorKey(pp, a, P)
def F(ik: IndexKey, x):
z = x #TODO: change z to erasure resilient encoding of x
return (owfe.f(pp, z), Mir(pp, z, ik.CT, ik.a))
def F_inv(tk: TrapdoorKey, y: GE, M: list):
assert RSum(M) == tk.a: "F_inv: RSum(M) != a"
z_prime = [None for _ in range(tk.m)]
for i in range(tk.m):
b0 = owfe.E2(tk.pp, y, i, 0, tk.P[i][0])
b1 = owfe.E2(tk.pp, y, i, 1, tk.P[i][1])
if M[i] == [b0, not b1]:
z_prime[i] = 1
elif M[i] == [not b0, b1]:
z_prime[i] = 0
x = z_prime #TODO: change x to erasure resilient decoding of z_prime
if F(ik, x) == u:
return x
return None
| 23.323232
| 80
| 0.525769
|
acff3aef5cc358ba6d33f974db3791a48006f350
| 3,810
|
py
|
Python
|
WoBert/data_helper_tnews.py
|
shawroad/Text-Classification-Pytorch
|
831560be9c4d45660dec85338a8e6e0a936c86ac
|
[
"Apache-2.0"
] | 4
|
2022-03-08T07:41:57.000Z
|
2022-03-20T11:41:16.000Z
|
WoBert/data_helper_tnews.py
|
shawroad/Text-Classification-Pytorch
|
831560be9c4d45660dec85338a8e6e0a936c86ac
|
[
"Apache-2.0"
] | null | null | null |
WoBert/data_helper_tnews.py
|
shawroad/Text-Classification-Pytorch
|
831560be9c4d45660dec85338a8e6e0a936c86ac
|
[
"Apache-2.0"
] | 1
|
2022-03-09T08:05:15.000Z
|
2022-03-09T08:05:15.000Z
|
"""
@file : data_helper_tnews.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2022-02-28
"""
import re
import torch
import json
import pandas as pd
from torch.utils.data import Dataset
def clean_text(text):
rule_url = re.compile(
'(https?://)?(www\\.)?[-a-zA-Z0-9@:%._+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_+.~#?&/=]*)'
)
rule_legal = re.compile('[^\\[\\]@#a-zA-Z0-9\u4e00-\u9fa5]')
rule_space = re.compile('\\s+')
text = str(text).replace('\\n', ' ').replace('\n', ' ').strip()
text = rule_url.sub(' ', text)
text = rule_legal.sub(' ', text)
text = rule_space.sub(' ', text)
return text.strip()
def load_data(path, label2id):
text_list, label_list = [], []
with open(path, 'r', encoding='utf8') as f:
lines = f.readlines()
for line in lines:
line = json.loads(line.strip())
# 'label_desc': 'news_edu', 'sentence': '上课时学生手机响个不停,老师一怒之下把手机摔了,家长拿发票让老师赔,大家怎么看待这种事?'
label = label2id[line['label_desc']]
text = line['sentence']
text_list.append(text)
label_list.append(label)
df = pd.DataFrame({'label': label_list, 'text': text_list})
df['text'] = df['text'].astype(str)
df.dropna(subset=['label', 'text'], inplace=True)
# df.drop_duplicates(subset='ad', keep='first', inplace=True) # 去重
df.reset_index(drop=True, inplace=True) # 重置索引
df.loc[:, 'text'] = df['text'].map(clean_text) # 清洗文本
return df
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer):
self.data = dataframe
self.tokenizer = tokenizer
self.text = dataframe.text
self.label = dataframe.label
def __len__(self):
return len(self.text)
def __getitem__(self, index):
inputs = self.tokenizer.encode_plus(
text=self.text[index],
text_pair=None,
add_special_tokens=True,
return_token_type_ids=True
)
input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
token_type_ids = inputs["token_type_ids"]
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
'label': self.label[index]
}
def pad_to_maxlen(input_ids, max_len, pad_value=0):
if len(input_ids) >= max_len:
input_ids = input_ids[:max_len]
else:
input_ids = input_ids + [pad_value] * (max_len - len(input_ids))
return input_ids
def collate_fn(batch):
# 按batch进行padding获取当前batch中最大长度
max_len = max([len(d['input_ids']) for d in batch])
if max_len > 512:
max_len = 512
# 定一个全局的max_len
# max_len = 128
input_ids, attention_mask, token_type_ids, labels = [], [], [], []
for item in batch:
input_ids.append(pad_to_maxlen(item['input_ids'], max_len=max_len))
attention_mask.append(pad_to_maxlen(item['attention_mask'], max_len=max_len))
token_type_ids.append(pad_to_maxlen(item['token_type_ids'], max_len=max_len))
labels.append(item['label'])
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(attention_mask, dtype=torch.long)
all_segment_ids = torch.tensor(token_type_ids, dtype=torch.long)
all_label_ids = torch.tensor(labels, dtype=torch.long)
return all_input_ids, all_input_mask, all_segment_ids, all_label_ids
if __name__ == '__main__':
label2id = json.load(open('../data/tnews_public/label2id.json', 'r', encoding='utf8'))
train_data_path = '../data/tnews_public/train.json'
train_df = load_data(train_data_path, label2id)
dev_data_path = '../data/tnews_public/dev.json'
dev_df = load_data(dev_data_path, label2id)
| 32.016807
| 112
| 0.627297
|
acff3b44d7a56fd4b82c8a75f1d512ed83742fd6
| 1,200
|
py
|
Python
|
google/ads/googleads/v5/enums/types/price_extension_price_qualifier.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/enums/types/price_extension_price_qualifier.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/enums/types/price_extension_price_qualifier.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v5.enums",
marshal="google.ads.googleads.v5",
manifest={"PriceExtensionPriceQualifierEnum",},
)
class PriceExtensionPriceQualifierEnum(proto.Message):
r"""Container for enum describing a price extension price
qualifier.
"""
class PriceExtensionPriceQualifier(proto.Enum):
r"""Enums of price extension price qualifier."""
UNSPECIFIED = 0
UNKNOWN = 1
FROM = 2
UP_TO = 3
AVERAGE = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.906977
| 74
| 0.709167
|
acff3c4b5a094cb75a6fbdd89f0b864357b545e4
| 599
|
py
|
Python
|
Leetcode/478. Generate Random Point in a Circle/solution2.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/478. Generate Random Point in a Circle/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/478. Generate Random Point in a Circle/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
from random import random
class Solution:
def __init__(self, radius: float, x_center: float, y_center: float):
self.radius = radius
self.xc = x_center
self.yc = y_center
def randPoint(self) -> List[float]:
while True:
rx = (random() - 0.5) * 2
ry = (random() - 0.5) * 2
if ((rx ** 2) + (ry ** 2)) <= 1: break
return [rx * self.radius + self.xc, ry * self.radius + self.yc]
# Your Solution object will be instantiated and called as such:
# obj = Solution(radius, x_center, y_center)
# param_1 = obj.randPoint()
| 28.52381
| 72
| 0.577629
|
acff3ca34b769c52c476c70daba4ca23e6b8f06e
| 681
|
py
|
Python
|
python/dgllife/model/model_zoo/__init__.py
|
Erfaan-Rostami/dgl-lifesci
|
08fc317f634fbaee4a8d074c332e871357845e4f
|
[
"Apache-2.0"
] | 1
|
2020-06-22T19:19:24.000Z
|
2020-06-22T19:19:24.000Z
|
python/dgllife/model/model_zoo/__init__.py
|
chaoyue729/dgl-lifesci
|
b7be8c865b8eaf5e1cac29a3b4dddbf4500bab16
|
[
"Apache-2.0"
] | null | null | null |
python/dgllife/model/model_zoo/__init__.py
|
chaoyue729/dgl-lifesci
|
b7be8c865b8eaf5e1cac29a3b4dddbf4500bab16
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Collection of model architectures
# Molecular property prediction
from .attentivefp_predictor import *
from .gat_predictor import *
from .gcn_predictor import *
from .gin_predictor import *
from .mgcn_predictor import *
from .mlp_predictor import *
from .mpnn_predictor import *
from .schnet_predictor import *
from .weave_predictor import *
# Generative models
from .dgmg import *
from .jtnn import *
# Reaction prediction
from .wln_reaction_center import *
from .wln_reaction_ranking import *
# Protein-Ligand Binding
from .acnn import *
| 23.482759
| 68
| 0.772394
|
acff3d28b2ee7d542dbd6f02d823425d11c32c9e
| 20,363
|
py
|
Python
|
tower_cli/cli/transfer/common.py
|
kedark3/tower-cli
|
487a1b9a8e96509798fee108e4f7d2c187177771
|
[
"Apache-2.0"
] | 363
|
2015-01-14T17:48:34.000Z
|
2022-01-29T06:37:04.000Z
|
tower_cli/cli/transfer/common.py
|
kedark3/tower-cli
|
487a1b9a8e96509798fee108e4f7d2c187177771
|
[
"Apache-2.0"
] | 703
|
2015-01-06T17:17:20.000Z
|
2020-09-16T15:54:17.000Z
|
tower_cli/cli/transfer/common.py
|
kedark3/tower-cli
|
487a1b9a8e96509798fee108e4f7d2c187177771
|
[
"Apache-2.0"
] | 203
|
2015-01-18T22:38:23.000Z
|
2022-01-28T19:19:05.000Z
|
import copy
import tower_cli
from tower_cli.api import client
from tower_cli.utils import debug
from tower_cli.exceptions import TowerCLIError
from tower_cli.resources.role import ACTOR_FIELDS
ASSET_TYPE_KEY = "asset_type"
ASSET_RELATION_KEY = "asset_relation"
SEND_ORDER = [
'user',
'organization',
'team',
'credential_type',
'credential',
'notification_template',
'inventory_script',
'project',
'inventory',
'job_template',
'workflow'
]
ENCRYPTED_VALUE = '$encrypted$'
API_POST_OPTIONS = {
# The post options of a schedule re buried under the asset type (i.e. job_template) so we are manually adding them
'schedules': {
"name": {
"required": True,
"max_length": 512,
},
"description": {
"required": False,
"default": ""
},
"unified_job_template": {
"required": True,
},
"enabled": {
"required": False,
"default": True
},
"rrule": {
"required": True,
"max_length": 255
},
"extra_data": {
"required": False,
"default": {}
}
}
}
NOTIFICATION_TYPES = ['notification_templates_error', 'notification_templates_success']
# Gets the POST options for the specified resource asset_type
def get_api_options(asset_type):
if asset_type not in API_POST_OPTIONS:
endpoint = tower_cli.get_resource(asset_type).endpoint
response = client.options(endpoint)
return_json = response.json()
if "actions" not in return_json or "POST" not in return_json["actions"]:
# Maybe we want to do a debug.log here
debug.log("WARNING: Asset type {} has no API POST options no pre-checks can be performed".format(
asset_type
))
API_POST_OPTIONS[asset_type] = None
else:
API_POST_OPTIONS[asset_type] = return_json["actions"]["POST"]
return API_POST_OPTIONS[asset_type]
# Takes an existing node, the post options and a target.
# Using these we are going to go through the existing node and see if an option
# is required or set to the default value and only take required and non-default values
def map_node_to_post_options(post_options, source_node, target_node):
# If the get_api_post_options fail we could get None.
# if that is the case, just return
if post_options is None:
return
# First map over the POST option fields
for option in post_options:
# Get the default value for this field from the post_options
default = None
if 'default' in post_options[option]:
default = post_options[option]['default']
# If the source_node does not have the option in it we can just move on
if option not in source_node:
continue
# if the field is required for posting than take our value
# If its not required and we have the default value, don't export the value
if 'required' in post_options[option] and post_options[option]["required"]:
target_node[option] = source_node[option]
elif option in source_node and source_node[option] != default:
# work-around AWX bug in 8.0.0 where webhook_service OPTIONS not right
if option == 'webhook_service' and source_node[option] == '':
continue
target_node[option] = source_node[option]
# Takes an asset and loops over the defined dependences
# These are things that may be other objects in Tower
# For example, a credential can be tied to an Organization
def resolve_asset_dependencies(an_asset, asset_type):
for relation in an_asset['related']:
if relation in an_asset:
# Multiple credentials on things like job templates come through as:
# vault_credential
# machine_credential
if relation.endswith("credential"):
model_type = "credential"
else:
model_type = relation
try:
expanded_relation = tower_cli.get_resource(model_type).get(an_asset[relation])
except TowerCLIError as e:
raise TowerCLIError("Unable to get {} named {}: {}".format(model_type, an_asset[relation], e))
identifier = get_identity(asset_type)
if identifier in expanded_relation:
an_asset[relation] = expanded_relation[identifier]
def get_identity(asset_type):
lookup_type = asset_type
if asset_type == 'schedules':
lookup_type = 'schedule'
identity_options = tower_cli.get_resource(lookup_type).identity
return identity_options[-1]
def remove_encrypted_values(hash_name):
for entry in hash_name:
if type(hash_name[entry]) == tower_cli.utils.data_structures.OrderedDict:
remove_encrypted_values(hash_name[entry])
elif hash_name[entry] == ENCRYPTED_VALUE:
hash_name[entry] = ''
def extract_workflow_nodes(asset):
# If workflow_node_post_options is not filled out, get it
workflow_node_post_options = get_api_options('node')
# Get the workflow nodes
query_params = [("workflow_job_template", asset['id'])]
nodes = tower_cli.get_resource('node').list(**{
"query": query_params,
'fail_on_multiple_results': False,
'all_pages': True
})
# We have to temporarily stash these.
# At the end of the process we need to go through all of the nodes and resolve the different
# node types from their IDs to their names
workflow_nodes_extracted = []
# This is a stash for us to map the IDs back to the labels
workflow_node_to_name_mapping = {}
node_number = 0
for workflow_node in nodes['results']:
node_name = 'node{}'.format(node_number)
node_number = node_number + 1
node_to_add = {
"name": node_name,
}
workflow_node_to_name_mapping[workflow_node['id']] = node_name
map_node_to_post_options(workflow_node_post_options, workflow_node, node_to_add)
# We can delete the workflow_job_template since we will be applying it to this workflow
if 'workflow_job_template' in node_to_add:
del node_to_add["workflow_job_template"]
# If the unified job template is missing, we can raise an error for this workflow
if 'unified_job_template' not in node_to_add:
raise TowerCLIError(
"Workflow export exception: workflow {} has a node whose job template has been deleted".format(
asset['name']
)
)
# Now we need to resolve the unified job template
del node_to_add["unified_job_template"]
node_to_add['unified_job_type'] = workflow_node["summary_fields"]["unified_job_template"]["unified_job_type"]
node_to_add['unified_job_name'] = workflow_node["summary_fields"]["unified_job_template"]["name"]
if 'credential' in workflow_node and workflow_node['credential']:
node_to_add['credential'] = tower_cli.get_resource('credential').get(workflow_node['credential'])['name']
if 'inventory' in workflow_node and workflow_node['inventory']:
node_to_add['inventory'] = tower_cli.get_resource('inventory').get(workflow_node['inventory'])['name']
# Finally copy over the different node types
for node_type in tower_cli.get_resource('workflow').workflow_node_types:
if node_type in workflow_node:
node_to_add[node_type] = workflow_node[node_type]
workflow_nodes_extracted.append(node_to_add)
# Finally we need to resolve all of the node IDs in the different types
for workflow_node in workflow_nodes_extracted:
for node_type in tower_cli.get_resource('workflow').workflow_node_types:
# Resolved nodes will be the resolved node names instead of IDs
resolved_nodes = []
for a_node_id in workflow_node[node_type]:
# If we found a node that does not resolve raise an exception
if a_node_id not in workflow_node_to_name_mapping:
raise TowerCLIError("Workflow export exception: unable to resolve node {} from {}".format(
a_node_id, asset['name'])
)
# Add the new node to the list of resolved node
resolved_nodes.append(workflow_node_to_name_mapping[a_node_id])
# Put the resolved nodes back into the object
workflow_node[node_type] = resolved_nodes
return workflow_nodes_extracted
def extract_inventory_relations(asset, relation_type):
# Get the API options for the relation
post_options = get_api_options(relation_type)
# Get all of the hosts
try:
relations = tower_cli.get_resource(relation_type).list(all_pages=True, **{'inventory': asset['id']})
except TowerCLIError as e:
raise TowerCLIError("Unable to get {} for {} : {}".format(relation_type, asset['id'], e))
return_relations = []
# If there are no results return an empty array
if 'results' not in relations:
return return_relations
name_to_id_map = {}
for relation in relations['results']:
# if this relation is controlled by an inventory source we can skip it
if 'has_inventory_sources' in relation and relation['has_inventory_sources']:
continue
name_to_id_map[relation['name']] = relation['id']
new_relation = {}
map_node_to_post_options(post_options, relation, new_relation)
if relation_type == 'inventory_source':
# If this is an inventory source we also need to resolve the source_project
if 'source_project' in relation and relation['source_project']:
try:
project = tower_cli.get_resource('project').get(relation['source_project'])
except TowerCLIError as e:
raise TowerCLIError("Unable to get project {} for {} : {}".format(
relation['source_project'], relation_type, e
))
new_relation['source_project'] = project['name']
if 'source_script' in relation and relation['source_script']:
try:
script = tower_cli.get_resource('inventory_script').get(relation['source_script'])
except TowerCLIError as e:
raise TowerCLIError("Unable to get inventory script {} for {} : {}".format(
relation['source_script'], relation_type, e
))
new_relation['source_script'] = script['name']
if 'credential' in relation and relation['credential']:
try:
credential = tower_cli.get_resource('credential').get(relation['credential'])
except TowerCLIError as e:
raise TowerCLIError("Unable to get inventory credential {} for {} : {}".format(
relation['credential'], relation_type, e
))
new_relation['credential'] = credential['name']
# Now get the schedules for this source
if 'related' in relation and 'schedules' in relation['related']:
schedule_data = extract_schedules(relation)
new_relation['schedules'] = schedule_data['items']
del new_relation['inventory']
return_relations.append(new_relation)
return {'items': return_relations, 'existing_name_to_id_map': name_to_id_map}
def extract_inventory_groups(asset):
return_asset = []
name_to_id_map = {}
if 'related' not in asset or 'root_groups' not in asset['related']:
debug.log("Asset {} does not have root_groups to process".format(asset['name']))
return return_asset
root_groups_response = load_all_assets(asset['related']['root_groups'])
for root_group in root_groups_response['results']:
# If this groups is controlled by a source, we can skip it
if 'has_inventory_sources' in root_group and root_group['has_inventory_sources']:
continue
name_to_id_map[root_group['name']] = {
'id': root_group['id'],
'sub_groups': {}
}
process_inv_group_data = process_inventory_groups(root_group)
return_asset.append(process_inv_group_data['items'])
name_to_id_map[root_group['name']]['sub_groups'] = process_inv_group_data['name_to_id_map']
return {'items': return_asset, 'existing_name_to_id_map': name_to_id_map}
def process_inventory_groups(group_json):
group_post_options = get_api_options('group')
group_to_return = {}
map_node_to_post_options(group_post_options, group_json, group_to_return)
name_to_id_map = {}
# Now we need to get the children for the group (which should all be groups)
if 'related' in group_json and 'children' in group_json['related']:
group_to_return['sub_groups'] = []
children = load_all_assets(group_json['related']['children'])
for child in children['results']:
if 'type' not in child:
debug.log("Found a child without a type in group {} : {}".format(group_json['name'], child))
continue
if child['type'] == 'group':
process_inv_data = process_inventory_groups(child)
group_to_return['sub_groups'].append(process_inv_data['items'])
name_to_id_map[child['name']] = {
'id': child['id'],
'sub_groups': process_inv_data['name_to_id_map']
}
else:
debug.log("Found unexpected child type of {} when processing group {}".format(
child['type'], group_json['name']
))
# And also get the hosts in this group
if 'related' in group_json and 'hosts' in group_json['related']:
group_to_return['hosts'] = []
hosts = load_all_assets(group_json['related']['hosts'])
for host in hosts['results']:
if 'name' not in host:
debug.log("Found a host without a name in group {} : {}".format(group_json['name'], host))
continue
group_to_return['hosts'].append(host['name'])
# we can remove the inventory option because we are appending this group directory to an inventory object
if 'inventory' in group_to_return:
del group_to_return['inventory']
return {'items': group_to_return, 'name_to_id_map': name_to_id_map}
def load_all_assets(url_to_load):
keep_loading = True
results = {
'count': 0,
'results': []
}
while keep_loading:
# Assume we are done
keep_loading = False
# Get the URL
response_object = client.request('GET', url_to_load)
response = response_object.json()
# Update the count and results
results['count'] += response['count']
results['results'] += response['results']
# Check if we have a next
if 'next' in response and response['next']:
url_to_load = response['next']
keep_loading = True
return results
def extract_notifications(asset, notification_type):
notifications = []
if 'related' in asset and notification_type in asset['related']:
response = load_all_assets(asset['related'][notification_type])
if 'results' in response:
for notification in response['results']:
notifications.append(notification['name'])
return notifications
def remove_local_path_from_scm_project(asset):
if 'scm_type' in asset and 'local_path' in asset and asset['scm_type'] not in ['Manual', '']:
del asset['local_path']
def get_assets_from_input(all=False, asset_input=None):
return_assets = {}
if all:
for aType in SEND_ORDER:
if aType not in return_assets:
return_assets[aType] = {'all': True, 'names': []}
return_assets[aType]['all'] = True
else:
for asset_type in asset_input:
return_assets[asset_type] = {'all': False, 'names': []}
for asset_name in asset_input[asset_type]:
if asset_name == 'all':
return_assets[asset_type]['all'] = True
else:
return_assets[asset_type]['names'].append(asset_name)
if return_assets == {}:
raise TowerCLIError("Nothing assets were specified")
return return_assets
def extract_credentials(asset):
return_credentials = []
name_to_id_map = {}
credentials = load_all_assets(asset['related']['credentials'])
for a_credential in credentials['results']:
name_to_id_map[a_credential['name']] = a_credential['id']
return_credentials.append(a_credential['name'])
return {'items': return_credentials, 'existing_name_to_id_map': name_to_id_map}
def extract_labels(asset):
return_labels = []
name_to_object_map = {}
# Labels exist in an organization so the name_to_object_map will be a dict of dicts.
# The first tier will be the org and the second the name
label_options = get_api_options('label')
labels = load_all_assets(asset['related']['labels'])
for a_label in labels['results']:
# First take a copy of this label (this will have the org as an integer)
pristine_label = copy.deepcopy(a_label)
reduced_label = {}
# Resolve the org name in the label (a_label will now have the org as a name)
resolve_asset_dependencies(a_label, 'label')
# Map a_label into the reduced label and append the reduced labels to the labels to return
map_node_to_post_options(label_options, a_label, reduced_label)
return_labels.append(reduced_label)
# Now, add the pristine_label to the map of objects to return.
# The keys of the objects to return will be [<the name of the org>][<the name of the label>]
if a_label['organization'] not in name_to_object_map:
name_to_object_map[a_label['organization']] = {}
name_to_object_map[a_label['organization']][a_label['name']] = pristine_label
return {'items': return_labels, 'existing_name_to_object_map': name_to_object_map}
def extract_schedules(asset):
return_schedules = []
name_to_object_map = {}
schedule_options = get_api_options('schedules')
schedules = load_all_assets(asset['related']['schedules'])
for a_schedule in schedules['results']:
name_to_object_map[a_schedule['name']] = a_schedule
reduced_schedule = {}
map_node_to_post_options(schedule_options, a_schedule, reduced_schedule)
del(reduced_schedule['unified_job_template'])
return_schedules.append(reduced_schedule)
return {'items': return_schedules, 'existing_name_to_object_map': name_to_object_map}
def extract_roles(existing_asset):
return_roles = []
name_to_object_map = {'role': {}}
# Get the roles from the object
if 'related' not in existing_asset or 'object_roles' not in existing_asset['related']:
return [], {}
roles = load_all_assets(existing_asset['related']['object_roles'])
if 'results' not in roles:
return [], {}
for role in roles['results']:
exported_role = {
'name': role['name'],
}
name_to_object_map['role'][role['name']] = role['id']
for actor in ACTOR_FIELDS:
plural_actor = "{}s".format(actor)
exported_role[actor] = []
name_to_object_map[actor] = {}
if plural_actor in role['related']:
role_items = load_all_assets(role['related'][plural_actor])
if 'results' not in role_items:
continue
for role_object in role_items['results']:
identity = role_object[get_identity(actor)]
exported_role[actor].append(identity)
name_to_object_map[actor][identity] = role_object['id']
return_roles.append(exported_role)
return {'items': return_roles, 'existing_name_to_object_map': name_to_object_map}
| 39.235067
| 118
| 0.640426
|
acff3e0ec477a412c078e51315bc3d11f65553d0
| 86,750
|
py
|
Python
|
lib/faker/providers/person/en/__init__.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 2
|
2017-06-12T11:08:01.000Z
|
2017-06-13T00:50:16.000Z
|
lib/faker/providers/person/en/__init__.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 12
|
2020-07-11T01:42:51.000Z
|
2020-08-12T17:17:35.000Z
|
lib/faker/providers/person/en/__init__.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 1
|
2019-11-23T08:32:07.000Z
|
2019-11-23T08:32:07.000Z
|
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}} {{suffix_female}}',
'{{prefix_female}} {{first_name_female}} {{last_name}} {{suffix_female}}'
)
formats_male = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}} {{suffix_male}}',
'{{prefix_male}} {{first_name_male}} {{last_name}} {{suffix_male}}'
)
formats = formats_male + formats_female
first_names_female = ('Aaliyah', 'Abagail', 'Abbey', 'Abbie', 'Abbigail',
'Abby', 'Abigail','Abigale', 'Abigayle', 'Abril', 'Achsah', 'Ada',
'Adah', 'Adaline', 'Adalyn','Adalynn', 'Adamaris', 'Adda', 'Addie',
'Addison', 'Addisyn', 'Addyson', 'Adel','Adela', 'Adelaide', 'Adele',
'Adelia', 'Adelina', 'Adeline', 'Adell', 'Adella','Adelle', 'Adelyn',
'Adelynn', 'Adilene', 'Adina', 'Adison', 'Adline', 'Adria','Adriana',
'Adriane', 'Adrianna', 'Adrianne', 'Adriene', 'Adrienne', 'Adyson',
'Affie', 'Afton', 'Agatha', 'Aggie', 'Agnes', 'Agness', 'Agusta',
'Aida','Aileen', 'Ailene', 'Aili', 'Aimee', 'Ainsley', 'Aisha',
'Aiyana', 'Aiyanna','Aja', 'Akeelah', 'Akira', 'Ala', 'Alabama',
'Alaina', 'Alana', 'Alani', 'Alanna', 'Alannah', 'Alaya', 'Alayna',
'Alba', 'Alberta', 'Albertha', 'Albertina', 'Albertine', 'Albina',
'Alcie', 'Alda', 'Aldona', 'Aleah', 'Alease', 'Alecia', 'Aleen',
'Aleena', 'Alejandra', 'Alena', 'Alene', 'Alesha', 'Alesia',
'Alessandra', 'Aleta', 'Aletha', 'Alethea', 'Alex', 'Alexa',
'Alexandr', 'Alexandra', 'Alexandrea', 'Alexandria', 'Alexia',
'Alexina', 'Alexis', 'Alexus', 'Alexys', 'Alfreda', 'Alia', 'Aliana',
'Alice', 'Alicia', 'Alida', 'Alina', 'Aline', 'Alisa', 'Alisha',
'Alison', 'Alissa', 'Alisson', 'Alivia', 'Aliya', 'Aliyah', 'Aliza',
'Alize', 'Alla', 'Allean', 'Alleen', 'Allena', 'Allene', 'Allie',
'Alline', 'Allison', 'Allisson', 'Ally', 'Allyson', 'Allyssa', 'Alma',
'Almeda', 'Almedia', 'Almeta', 'Almina', 'Almira', 'Almyra', 'Aloma',
'Alondra', 'Alpha', 'Alphonsine', 'Alta', 'Altha', 'Althea', 'Altie',
'Alvena', 'Alvera', 'Alverda', 'Alverta', 'Alvina', 'Alvira',
'Alwilda', 'Alwina', 'Alwine', 'Alyce', 'Alycia', 'Alys', 'Alysa',
'Alyse', 'Alysha', 'Alysia', 'Alyson', 'Alyssa', 'Alyssia', 'Alyvia',
'Alzina', 'Ama', 'Amalia', 'Amalie', 'Amanda', 'Amani', 'Amara',
'Amari', 'Amaris', 'Amaya', 'Amber', 'Amberly', 'Amelia', 'Amelie',
'America', 'Amey', 'Ami', 'Amiah', 'Amie', 'Amina', 'Amira', 'Amirah',
'Amiya', 'Amiyah', 'Amma', 'Ammie', 'Amparo', 'Amy', 'Amya', 'Ana',
'Anabel', 'Anabella', 'Anabelle', 'Anahi', 'Anais', 'Analia',
'Anastacia', 'Anastasia', 'Anaya', 'Andra', 'Andrea', 'Andria',
'Angel', 'Angela', 'Angele', 'Angeles', 'Angelia', 'Angelic',
'Angelica', 'Angelina', 'Angeline', 'Angelique', 'Angelita', 'Angella',
'Angie', 'Anice', 'Anie', 'Anika', 'Anissa', 'Anita', 'Anitra',
'Aniya', 'Aniyah', 'Anjali', 'Anjanette', 'Anjelica', 'Ann', 'Anna',
'Annabel', 'Annabell', 'Annabella', 'Annabelle', 'Annalise', 'Annamae',
'Annamarie', 'Anne', 'Anneliese', 'Annemarie', 'Anner', 'Annetta',
'Annette', 'Annice', 'Annie', 'Annika', 'Annis', 'Annmarie', 'Anona',
'Ansley', 'Antionette', 'Antoinette', 'Antonetta', 'Antonette',
'Antonia', 'Antonina', 'Anya', 'April', 'Ara', 'Arabella', 'Araceli',
'Aracely', 'Arah', 'Araminta', 'Ardath', 'Ardelia', 'Ardell',
'Ardella', 'Ardelle', 'Arden', 'Ardeth', 'Ardis', 'Ardith', 'Ardyce',
'Areli', 'Arely', 'Aretha', 'Argie', 'Aria', 'Ariana', 'Ariane',
'Arianna', 'Arie', 'Ariel', 'Ariella', 'Arielle', 'Arietta', 'Arizona',
'Arkie', 'Arla', 'Arleen', 'Arlena', 'Arlene', 'Arleth', 'Arletta',
'Arley', 'Arlie', 'Arline', 'Arly', 'Arlyne', 'Armani', 'Armida',
'Arminda', 'Arminta', 'Arnetta', 'Arra', 'Arrie', 'Arta', 'Artelia',
'Arvilla', 'Aryana', 'Aryanna', 'Asha', 'Ashanti', 'Ashely', 'Ashlea',
'Ashlee', 'Ashleigh', 'Ashley', 'Ashli', 'Ashlie', 'Ashly', 'Ashlyn',
'Ashlynn', 'Ashtyn', 'Asia', 'Ason', 'Aspen', 'Assunta', 'Astrid',
'Atha', 'Athena', 'Attie', 'Aubree', 'Aubrey', 'Aubrie', 'Audie',
'Audra', 'Audrey', 'Audriana', 'Audrianna', 'Audrina', 'Audry',
'Augusta', 'Augustina', 'Aura', 'Aurelia', 'Aurilla', 'Aurora',
'Aurore', 'Autumn', 'Ava', 'Avah', 'Averi', 'Averie', 'Avie', 'Avis',
'Ayana', 'Ayanna', 'Ayesha', 'Ayla', 'Ayleen', 'Aylin', 'Azalee',
'Azaria', 'Azariah', 'Azul', 'Azzie', 'Babette', 'Baby', 'Bailee',
'Bailey', 'Bama', 'Bambi', 'Barb', 'Barbara', 'Barbie', 'Barbra',
'Baylee', 'Baylie', 'Bea', 'Beadie', 'Beatrice', 'Beatrix', 'Beatriz',
'Beaulah', 'Bebe', 'Beckie', 'Becky', 'Beda', 'Bee', 'Belen', 'Belia',
'Belinda', 'Bell', 'Bella', 'Belle', 'Belva', 'Bena', 'Benita',
'Bennie', 'Berdie', 'Berenice', 'Bernadette', 'Bernadine',
'Bernardine', 'Berneice', 'Bernetta', 'Bernice', 'Berniece', 'Bernita',
'Berta', 'Bertha', 'Bertie', 'Bertina', 'Beryl', 'Bess', 'Besse',
'Bessie', 'Beth', 'Betha', 'Bethann', 'Bethany', 'Bethel', 'Bethzy',
'Betsey', 'Betsy', 'Bette', 'Bettie', 'Bettina', 'Betty', 'Bettye',
'Bettyjane', 'Bettylou', 'Beula', 'Beulah', 'Bev', 'Beverlee',
'Beverley', 'Beverly', 'Beyonce', 'Bianca', 'Biddie', 'Billie',
'Billy', 'Billye', 'Bina', 'Bird', 'Birdella', 'Birdie', 'Birtha',
'Birtie', 'Blair', 'Blake', 'Blanca', 'Blanch', 'Blanche', 'Blanchie',
'Blossom', 'Bobbi', 'Bobbie', 'Bobby', 'Bobbye', 'Bonita', 'Bonnie',
'Bonny', 'Braelyn', 'Brande', 'Brandee', 'Brandi', 'Brandie',
'Brandon', 'Brandy', 'Brea', 'Breana', 'Breann', 'Breanna', 'Breanne',
'Bree', 'Brenda', 'Brenna', 'Breonna', 'Brett', 'Bria', 'Briana',
'Brianda', 'Brianna', 'Brianne', 'Bridget', 'Bridgett', 'Bridgette',
'Brielle', 'Brigette', 'Brigid', 'Brigitte', 'Briley', 'Brinda',
'Brinley', 'Brionna', 'Brisa', 'Bristol', 'Britany', 'Britney',
'Britni', 'Britny', 'Britt', 'Britta', 'Brittaney', 'Brittani',
'Brittanie', 'Brittany', 'Brittnay', 'Brittnee', 'Brittney', 'Brittni',
'Brittnie', 'Brittny', 'Brook', 'Brooke', 'Brooklyn', 'Brooklynn',
'Bryana', 'Bryanna', 'Brylee', 'Bryn', 'Brynlee', 'Brynn', 'Buelah',
'Buena', 'Buffy', 'Bula', 'Bulah', 'Buna', 'Burnice', 'Byrd', 'Byrdie',
'Caddie', 'Cadence', 'Cailyn', 'Caitlin', 'Caitlyn', 'Caitlynn',
'Caldonia', 'Caleigh', 'Cali', 'Calista', 'Calla', 'Calleigh',
'Callie', 'Cambria', 'Cameron', 'Cami', 'Camila', 'Camilla', 'Camille',
'Camisha', 'Cammie', 'Campbell', 'Camryn', 'Candace', 'Candi',
'Candice', 'Candida', 'Candis', 'Candy', 'Candyce', 'Cannie',
'Capitola', 'Cappie', 'Caprice', 'Cara', 'Caren', 'Carey', 'Cari',
'Carie', 'Carin', 'Carina', 'Carisa', 'Carissa', 'Carla', 'Carlee',
'Carleen', 'Carleigh', 'Carlene', 'Carley', 'Carli', 'Carlie',
'Carlota', 'Carlotta', 'Carly', 'Carlyn', 'Carma', 'Carmel', 'Carmela',
'Carmelita', 'Carmella', 'Carmen', 'Caro', 'Carol', 'Carolann',
'Carole', 'Carolee', 'Carolina', 'Caroline', 'Carolyn', 'Carolyne',
'Carolynn', 'Caron', 'Carra', 'Carri', 'Carrie', 'Carrol', 'Carroll',
'Carry', 'Carson', 'Cary', 'Caryl', 'Caryn', 'Casandra', 'Casey',
'Casie', 'Cassandra', 'Cassidy', 'Cassie', 'Cassondra', 'Catalina',
'Catharine', 'Catherine', 'Cathern', 'Cathey', 'Cathi', 'Cathie',
'Cathleen', 'Cathrine', 'Cathryn', 'Cathy', 'Catina', 'Catrina',
'Caydence', 'Cayla', 'Caylee', 'Cecelia', 'Cecile', 'Cecilia',
'Cecily', 'Ceil', 'Celena', 'Celesta', 'Celeste', 'Celestia',
'Celestine', 'Celia', 'Celie', 'Celina', 'Celine', 'Cena', 'Ceola',
'Chaka', 'Chana', 'Chanda', 'Chandler', 'Chandra', 'Chanel',
'Chanelle', 'Chaney', 'Chanie', 'Channie', 'Channing', 'Chantal',
'Chante', 'Chantel', 'Chantelle', 'Charissa', 'Charisse', 'Charity',
'Charla', 'Charlee', 'Charleen', 'Charlene', 'Charley', 'Charlie',
'Charline', 'Charlize', 'Charlotta', 'Charlotte', 'Charlottie',
'Charlsie', 'Charmaine', 'Charolette', 'Chase', 'Chasity', 'Chastity',
'Chaya', 'Chelsea', 'Chelsey', 'Chelsi', 'Chelsie', 'Chelsy', 'Cher',
'Cherelle', 'Cheri', 'Cherie', 'Cherilyn', 'Cherise', 'Cherish',
'Cherrelle', 'Cherri', 'Cherrie', 'Cherry', 'Cherryl', 'Cheryl',
'Cheryle', 'Cheryll', 'Chessie', 'Chestina', 'Cheyanne', 'Cheyenne',
'Chimere', 'China', 'Chiquita', 'Chloe', 'Chloie', 'Chris', 'Chrissie',
'Chrissy', 'Christa', 'Christal', 'Christeen', 'Christel', 'Christen',
'Christena', 'Christene', 'Christi', 'Christian', 'Christiana',
'Christie', 'Christin', 'Christina', 'Christine', 'Christy',
'Chrystal', 'Chyna', 'Chynna', 'Ciara', 'Ciarra', 'Cicely', 'Cielo',
'Ciera', 'Cierra', 'Ciji', 'Cilla', 'Cinda', 'Cindi', 'Cindy',
'Cinnamon', 'Cinthia', 'Citlali', 'Citlalli', 'Clair', 'Claire',
'Clara', 'Clarabelle', 'Clare', 'Claribel', 'Clarice', 'Clarinda',
'Clarine', 'Clarisa', 'Clarissa', 'Classie', 'Claudette', 'Claudia',
'Claudie', 'Claudine', 'Cleda', 'Clella', 'Clem', 'Clemence',
'Clementina', 'Clementine', 'Clemie', 'Clemma', 'Clemmie', 'Cleo',
'Cleola', 'Cleone', 'Cleora', 'Cleta', 'Cleva', 'Clevie', 'Cliffie',
'Cloe', 'Clora', 'Clotilda', 'Clotilde', 'Clyda', 'Clydie', 'Clytie',
'Coleen', 'Coletta', 'Colette', 'Colleen', 'Collette', 'Columbia',
'Concepcion', 'Concetta', 'Concha', 'Connie', 'Constance', 'Consuela',
'Consuelo', 'Contina', 'Cora', 'Coraima', 'Coral', 'Coralie', 'Corda',
'Cordelia', 'Cordella', 'Cordia', 'Cordie', 'Corean', 'Corene',
'Coretta', 'Corey', 'Cori', 'Corie', 'Corina', 'Corine', 'Corinna',
'Corinne', 'Corliss', 'Cornelia', 'Cornie', 'Corrie', 'Corrina',
'Corrine', 'Cortney', 'Cory', 'Courtney', 'Creola', 'Cressie', 'Crete',
'Crissie', 'Crissy', 'Crista', 'Cristal', 'Cristen', 'Cristi',
'Cristin', 'Cristina', 'Cristine', 'Cristy', 'Cruz', 'Crysta',
'Crystal', 'Cuba', 'Cydney', 'Cyndi', 'Cyntha', 'Cynthia', 'Dafne',
'Dagmar', 'Dagny', 'Dahlia', 'Daija', 'Daijah', 'Daisey', 'Daisha',
'Daisie', 'Daisy', 'Daisye', 'Daja', 'Dakota', 'Dale', 'Dalia',
'Dallas', 'Damaris', 'Dana', 'Danae', 'Daneen', 'Danelle', 'Danette',
'Dani', 'Dania', 'Danica', 'Daniela', 'Daniele', 'Daniella',
'Danielle', 'Danika', 'Danita', 'Danna', 'Dannie', 'Dannielle',
'Danyel', 'Danyell', 'Danyelle', 'Daphne', 'Dara', 'Darby', 'Darci',
'Darcie', 'Darcy', 'Daria', 'Darian', 'Dariana', 'Darla', 'Darleen',
'Darlene', 'Darline', 'Darlyne', 'Dasia', 'Davina', 'Dawn', 'Dawna',
'Dawne', 'Dayami', 'Dayana', 'Dayanara', 'Dayle', 'Dayna', 'Dayse',
'Deana', 'Deandra', 'Deann', 'Deanna', 'Deanne', 'Deasia', 'Deb',
'Debbi', 'Debbie', 'Debbra', 'Debby', 'Debera', 'Debi', 'Debora',
'Deborah', 'Deborrah', 'Debra', 'Debrah', 'Debroah', 'Dedra', 'Dee',
'Deeann', 'Deedee', 'Deena', 'Deetta', 'Deidra', 'Deidre', 'Deirdre',
'Deja', 'Dejah', 'Delaney', 'Delcie', 'Delfina', 'Delia', 'Deliah',
'Delila', 'Delilah', 'Delina', 'Delinda', 'Delisa', 'Dell', 'Della',
'Dellar', 'Delle', 'Dellia', 'Dellie', 'Delma', 'Delois', 'Delora',
'Delores', 'Deloris', 'Delpha', 'Delphia', 'Delphine', 'Delsie',
'Delta', 'Dema', 'Demetra', 'Demetria', 'Demi', 'Dena', 'Deneen',
'Denese', 'Denice', 'Denine', 'Denise', 'Denisha', 'Denisse', 'Denita',
'Dennie', 'Desirae', 'Desiree', 'Dessa', 'Dessie', 'Destany',
'Destinee', 'Destiney', 'Destini', 'Destiny', 'Devan', 'Devin',
'Devon', 'Devyn', 'Dewey', 'Deyanira', 'Dezzie', 'Diamond', 'Dian',
'Diana', 'Diandra', 'Diane', 'Diann', 'Dianna', 'Dianne', 'Dicie',
'Dicy', 'Dillie', 'Dimple', 'Dina', 'Dinah', 'Dione', 'Dionne',
'Dixie', 'Diya', 'Djuana', 'Djuna', 'Docia', 'Dola', 'Dollie', 'Dolly',
'Dollye', 'Dolores', 'Doloris', 'Domenica', 'Dominga', 'Dominique',
'Dominque', 'Domonique', 'Dona', 'Donia', 'Donie', 'Donita', 'Donna',
'Donnie', 'Dora', 'Dorathea', 'Dorathy', 'Dorcas', 'Doreen', 'Dorene',
'Doretha', 'Doretta', 'Dori', 'Dorinda', 'Dorine', 'Doris', 'Dorla',
'Dorotha', 'Dorothea', 'Dorothy', 'Dorris', 'Dortha', 'Dorthea',
'Dorthey', 'Dorthy', 'Dosha', 'Doshia', 'Doshie', 'Dosia', 'Dossie',
'Dot', 'Dottie', 'Dotty', 'Dove', 'Dovie', 'Drema', 'Drew', 'Drucilla',
'Drusilla', 'Dulce', 'Dulcie', 'Dusty', 'Dwan', 'Dyan', 'Dylan',
'Earlean', 'Earlene', 'Earlie', 'Earline', 'Earnestine', 'Eartha',
'Easter', 'Eathel', 'Ebba', 'Eboni', 'Ebony', 'Echo', 'Eda', 'Eddie',
'Eden', 'Edie', 'Edith', 'Edla', 'Edmonia', 'Edna', 'Ednah', 'Edra',
'Edrie', 'Edris', 'Edwina', 'Edyth', 'Edythe', 'Effa', 'Effie',
'Eileen', 'Eithel', 'Ela', 'Elaina', 'Elaine', 'Elana', 'Elayne',
'Elba', 'Elberta', 'Elda', 'Eldora', 'Eleanor', 'Eleanora', 'Eleanore',
'Elease', 'Electa', 'Elena', 'Elenor', 'Elenora', 'Elenore',
'Eleonora', 'Eleonore', 'Elfie', 'Elfreda', 'Elfrieda', 'Elgie',
'Elia', 'Eliana', 'Elianna', 'Elida', 'Elinor', 'Elinore', 'Elisa',
'Elisabeth', 'Elise', 'Elisha', 'Elissa', 'Eliza', 'Elizabet',
'Elizabeth', 'Elizbeth', 'Elizebeth', 'Ella', 'Ellamae', 'Ellar',
'Elle', 'Ellen', 'Eller', 'Elliana', 'Ellie', 'Ellyn', 'Elma',
'Elmina', 'Elmira', 'Elmire', 'Elmyra', 'Elna', 'Elnora', 'Elodie',
'Elois', 'Eloisa', 'Eloise', 'Elouise', 'Elsa', 'Else', 'Elsie',
'Elta', 'Elva', 'Elvera', 'Elvia', 'Elvie', 'Elvina', 'Elvira',
'Elwanda', 'Elyse', 'Elyssa', 'Elza', 'Elzada', 'Ema', 'Emaline',
'Ember', 'Emelia', 'Emelie', 'Emeline', 'Emely', 'Emerald', 'Emerson',
'Emery', 'Emilee', 'Emilia', 'Emilie', 'Emily', 'Emma', 'Emmalee',
'Emmaline', 'Emmer', 'Emmie', 'Emmy', 'Emogene', 'Ena', 'Enid',
'Enola', 'Enriqueta', 'Eola', 'Eppie', 'Epsie', 'Era', 'Erica',
'Ericka', 'Erie', 'Erika', 'Erin', 'Eris', 'Erla', 'Erlene', 'Erlinda',
'Erline', 'Erma', 'Ermina', 'Ermine', 'Erna', 'Ernestina', 'Ernestine',
'Erykah', 'Eryn', 'Esmeralda', 'Esperanza', 'Essa', 'Essence', 'Essie',
'Esta', 'Estefani', 'Estefania', 'Estefany', 'Estela', 'Estell',
'Estella', 'Estelle', 'Ester', 'Esther', 'Estie', 'Estrella', 'Etha',
'Ethel', 'Ethelene', 'Ethelyn', 'Ether', 'Ethie', 'Ethyl', 'Ethyle',
'Etna', 'Etta', 'Etter', 'Ettie', 'Eudora', 'Eugenia', 'Eugenie',
'Eula', 'Eulah', 'Eulalia', 'Eulalie', 'Euna', 'Eunice', 'Euphemia',
'Eura', 'Eva', 'Evalena', 'Evaline', 'Evalyn', 'Evangelina',
'Evangeline', 'Eve', 'Evelena', 'Evelin', 'Evelina', 'Eveline',
'Evelyn', 'Evelyne', 'Evelynn', 'Ever', 'Evette', 'Evia', 'Evie',
'Evita', 'Evon', 'Evonne', 'Exa', 'Exie', 'Fabiola', 'Fae', 'Fairy',
'Faith', 'Fallon', 'Falon', 'Fannie', 'Fanny', 'Fannye', 'Farah',
'Farrah', 'Fatima', 'Fawn', 'Fay', 'Faye', 'Felecia', 'Felice',
'Felicia', 'Felicie', 'Felicitas', 'Felicity', 'Felipa', 'Felisha',
'Fern', 'Fernanda', 'Ferne', 'Fidelia', 'Filomena', 'Finley', 'Fiona',
'Flavia', 'Fleda', 'Fleeta', 'Fleta', 'Flo', 'Flonnie', 'Flor',
'Flora', 'Florance', 'Florence', 'Florene', 'Floretta', 'Florida',
'Florie', 'Florine', 'Florrie', 'Flossie', 'Floy', 'Fonda', 'Forest',
'Fran', 'Franc', 'Frances', 'Francesca', 'Francies', 'Francina',
'Francine', 'Francis', 'Francisca', 'Francisquita', 'Frankie', 'Freda',
'Freddie', 'Frederica', 'Fredericka', 'Freeda', 'Freida', 'Frida',
'Frieda', 'Frona', 'Fronia', 'Fronie', 'Fronnie', 'Fumiko', 'Gabriela',
'Gabriella', 'Gabrielle', 'Gail', 'Gale', 'Galilea', 'Garnet',
'Garnett', 'Gay', 'Gaye', 'Gayla', 'Gayle', 'Gaylene', 'Gaynell',
'Gearldine', 'Gemma', 'Gena', 'Gene', 'Genesis', 'Geneva', 'Genevieve',
'Genevra', 'Genie', 'Gennie', 'Genoveva', 'Georganna', 'Georgeann',
'Georgeanna', 'Georgene', 'Georgetta', 'Georgette', 'Georgia',
'Georgiana', 'Georgiann', 'Georgianna', 'Georgie', 'Georgina',
'Georgine', 'Geraldine', 'Geralyn', 'Gerda', 'Geri', 'Germaine',
'Gerri', 'Gerry', 'Gertha', 'Gertie', 'Gertrude', 'Gia', 'Giada',
'Giana', 'Gianna', 'Gidget', 'Gigi', 'Gilda', 'Gillian', 'Gillie',
'Gina', 'Ginger', 'Ginny', 'Giovanna', 'Girtha', 'Gisele', 'Giselle',
'Gisselle', 'Giuliana', 'Gladis', 'Gladyce', 'Gladys', 'Glenda',
'Glendora', 'Glenn', 'Glenna', 'Glennie', 'Glennis', 'Glinda',
'Gloria', 'Glynda', 'Glynis', 'Golda', 'Golden', 'Goldia', 'Goldie',
'Grace', 'Gracelyn', 'Gracia', 'Gracie', 'Graciela', 'Grayce',
'Grecia', 'Gregoria', 'Greta', 'Gretchen', 'Gretta', 'Grisel',
'Griselda', 'Guadalupe', 'Gunda', 'Gussie', 'Gusta', 'Gustie', 'Gwen',
'Gwenda', 'Gwendolyn', 'Gwyn', 'Gwyneth', 'Hadassah', 'Hadley',
'Hailee', 'Hailey', 'Hailie', 'Haleigh', 'Haley', 'Hali', 'Halie',
'Halle', 'Halley', 'Hallie', 'Hana', 'Hanna', 'Hannah', 'Harlene',
'Harley', 'Harlow', 'Harmony', 'Harper', 'Harriet', 'Harriett',
'Harriette', 'Haruko', 'Hasel', 'Hassie', 'Hattie', 'Haven', 'Hayden',
'Haylee', 'Hayleigh', 'Hayley', 'Haylie', 'Hazel', 'Hazelle', 'Hazle',
'Heather', 'Heaven', 'Hedwig', 'Hedy', 'Heidi', 'Heidy', 'Helaine',
'Helen', 'Helena', 'Helene', 'Helga', 'Hellen', 'Helma', 'Helyn',
'Hennie', 'Henretta', 'Henrietta', 'Henriette', 'Herlinda', 'Herma',
'Hermina', 'Hermine', 'Herminia', 'Hertha', 'Hessie', 'Hester',
'Hettie', 'Hetty', 'Hilah', 'Hilary', 'Hilda', 'Hildegard',
'Hildegarde', 'Hildred', 'Hildur', 'Hillary', 'Hilma', 'Holli',
'Hollie', 'Hollis', 'Holly', 'Honora', 'Hope', 'Hortencia', 'Hortense',
'Hortensia', 'Hulda', 'Huldah', 'Hunter', 'Ica', 'Icey', 'Icie', 'Icy',
'Ida', 'Idabelle', 'Idamae', 'Idell', 'Idella', 'Iesha', 'Ieshia',
'Ila', 'Ilah', 'Ilda', 'Ilene', 'Iliana', 'Illa', 'Ilma', 'Ilo',
'Ilona', 'Ima', 'Imani', 'Imelda', 'Imo', 'Imogene', 'Ina', 'India',
'Indiana', 'Inell', 'Ines', 'Inez', 'Infant', 'Inga', 'Ingeborg',
'Inger', 'Ingrid', 'Iola', 'Iona', 'Ione', 'Ira', 'Ireland', 'Irena',
'Irene', 'Iridian', 'Irine', 'Iris', 'Irma', 'Irva', 'Isa', 'Isabel',
'Isabela', 'Isabell', 'Isabella', 'Isabelle', 'Isadora', 'Isamar',
'Isis', 'Isla', 'Isobel', 'Itzel', 'Iva', 'Ivah', 'Ivana', 'Ivanna',
'Ivette', 'Ivey', 'Ivie', 'Ivonne', 'Ivory', 'Ivy', 'Iyana', 'Iyanna',
'Iza', 'Izabella', 'Izabelle', 'Izetta', 'Izola', 'Izora', 'Jacalyn',
'Jacey', 'Jackeline', 'Jacki', 'Jackie', 'Jacklyn', 'Jaclyn', 'Jacque',
'Jacquelin', 'Jacqueline', 'Jacquelyn', 'Jacquline', 'Jacqulyn',
'Jada', 'Jade', 'Jaden', 'Jadyn', 'Jaeda', 'Jaelyn', 'Jaelynn',
'Jaida', 'Jaiden', 'Jaidyn', 'Jailene', 'Jailyn', 'Jaime', 'Jaimee',
'Jakayla', 'Jaleesa', 'Jalisa', 'Jalissa', 'Jaliyah', 'Jalyn',
'Jalynn', 'Jamey', 'Jami', 'Jamie', 'Jamila', 'Jamiya', 'Jammie',
'Jamya', 'Jan', 'Jana', 'Janae', 'Janay', 'Jane', 'Janeen', 'Janel',
'Janell', 'Janelle', 'Janene', 'Janessa', 'Janet', 'Janette', 'Janey',
'Janiah', 'Janice', 'Janie', 'Janine', 'Janis', 'Janiya', 'Janiyah',
'Jann', 'Janna', 'Jannette', 'Jannie', 'January', 'Janyce', 'Jaquelin',
'Jaqueline', 'Jaslene', 'Jaslyn', 'Jasmin', 'Jasmine', 'Jasmyn',
'Jasmyne', 'Jaunita', 'Jaycee', 'Jaycie', 'Jayda', 'Jayde', 'Jayden',
'Jaye', 'Jayla', 'Jaylah', 'Jaylee', 'Jayleen', 'Jaylen', 'Jaylene',
'Jaylin', 'Jaylyn', 'Jaylynn', 'Jayme', 'Jayne', 'Jazlene', 'Jazlyn',
'Jazlynn', 'Jazmin', 'Jazmine', 'Jazmyn', 'Jazmyne', 'Jean', 'Jeana',
'Jeane', 'Jeanetta', 'Jeanette', 'Jeanie', 'Jeanine', 'Jeanmarie',
'Jeanna', 'Jeanne', 'Jeannette', 'Jeannie', 'Jeannine', 'Jeffie',
'Jemima', 'Jena', 'Jenelle', 'Jenifer', 'Jenilee', 'Jenna', 'Jennette',
'Jenni', 'Jennie', 'Jennifer', 'Jenniffer', 'Jenny', 'Jensen',
'Jeraldine', 'Jeri', 'Jerica', 'Jerilyn', 'Jerilynn', 'Jerri',
'Jerrica', 'Jerrie', 'Jerrilyn', 'Jerusha', 'Jeryl', 'Jesenia',
'Jesica', 'Jesse', 'Jessenia', 'Jessi', 'Jessica', 'Jessie', 'Jessika',
'Jessye', 'Jetta', 'Jettie', 'Jewel', 'Jewell', 'Jill', 'Jillian',
'Jimena', 'Jinnie', 'Jo', 'Joan', 'Joana', 'Joanie', 'Joann', 'Joanna',
'Joanne', 'Jocelyn', 'Jocelyne', 'Jocelynn', 'Jodi', 'Jodie', 'Jody',
'Joell', 'Joella', 'Joelle', 'Joellen', 'Joetta', 'Joette', 'Johana',
'Johanna', 'Johannah', 'Johnie', 'Johnna', 'Johnnie', 'Joi', 'Joleen',
'Jolene', 'Jolette', 'Jolie', 'Joline', 'Jonell', 'Joni', 'Jonna',
'Jonnie', 'Jordan', 'Jordin', 'Jordyn', 'Joretta', 'Jorja', 'Josefa',
'Josefina', 'Josefita', 'Joselin', 'Joseline', 'Joselyn', 'Josephine',
'Josette', 'Josie', 'Josiephine', 'Joslyn', 'Jossie', 'Journey',
'Jovita', 'Joy', 'Joyce', 'Joycelyn', 'Joye', 'Juana', 'Juanita',
'Judi', 'Judie', 'Judith', 'Judy', 'Judyth', 'Jule', 'Juli', 'Julia',
'Juliana', 'Juliann', 'Julianna', 'Julianne', 'Julie', 'Juliet',
'Juliette', 'Julisa', 'Julissa', 'June', 'Junia', 'Junie', 'Justice',
'Justina', 'Justine', 'Kaaren', 'Kacey', 'Kaci', 'Kacie', 'Kacy',
'Kadence', 'Kadijah', 'Kaela', 'Kaelyn', 'Kaelynn', 'Kaia', 'Kaila',
'Kailee', 'Kailey', 'Kailyn', 'Kaitlin', 'Kaitlyn', 'Kaitlynn',
'Kaiya', 'Kala', 'Kaleena', 'Kaleigh', 'Kalene', 'Kaley', 'Kali',
'Kalie', 'Kaliyah', 'Kallie', 'Kalyn', 'Kamari', 'Kameron', 'Kami',
'Kamila', 'Kamilah', 'Kamora', 'Kamryn', 'Kamya', 'Kandace', 'Kandi',
'Kandice', 'Kandy', 'Kanesha', 'Kanisha', 'Kara', 'Karan', 'Karel',
'Karen', 'Kari', 'Karie', 'Karin', 'Karina', 'Karis', 'Karissa',
'Karla', 'Karlee', 'Karlene', 'Karley', 'Karli', 'Karlie', 'Karly',
'Karma', 'Karol', 'Karolyn', 'Karon', 'Karren', 'Karri', 'Karrie',
'Karsyn', 'Karyl', 'Karyme', 'Karyn', 'Kasandra', 'Kasey', 'Kasie',
'Kassandra', 'Kassidy', 'Kassie', 'Katarina', 'Kate', 'Katelin',
'Katelyn', 'Katelynn', 'Katerina', 'Kathaleen', 'Katharina',
'Katharine', 'Katharyn', 'Katherin', 'Katherine', 'Kathern',
'Katheryn', 'Kathey', 'Kathi', 'Kathie', 'Kathleen', 'Kathlene',
'Kathlyn', 'Kathrine', 'Kathryn', 'Kathryne', 'Kathy', 'Kathyrn',
'Kati', 'Katia', 'Katie', 'Katina', 'Katlin', 'Katlyn', 'Katlynn',
'Katrina', 'Kattie', 'Katy', 'Kay', 'Kaya', 'Kaycee', 'Kayden',
'Kaydence', 'Kaye', 'Kayla', 'Kaylah', 'Kaylan', 'Kaylee', 'Kayleen',
'Kayleigh', 'Kaylen', 'Kaylene', 'Kayley', 'Kayli', 'Kaylie', 'Kaylin',
'Kaylyn', 'Kaylynn', 'Kazuko', 'Keanna', 'Keara', 'Kecia', 'Keeley',
'Keely', 'Keena', 'Keesha', 'Keila', 'Keira', 'Keisha', 'Kelcie',
'Keli', 'Kelis', 'Kellee', 'Kelley', 'Kelli', 'Kellie', 'Kelly',
'Kelsea', 'Kelsey', 'Kelsi', 'Kelsie', 'Kendal', 'Kendall', 'Kendra',
'Kenia', 'Kenisha', 'Kenley', 'Kenna', 'Kennedi', 'Kennedy', 'Kenya',
'Kenyatta', 'Kenzie', 'Keri', 'Kerri', 'Kerrie', 'Kerry', 'Kesha',
'Keshia', 'Keyla', 'Khadijah', 'Khalilah', 'Khloe', 'Kia', 'Kiana',
'Kianna', 'Kiara', 'Kiarra', 'Kiera', 'Kierra', 'Kiersten', 'Kiley',
'Kim', 'Kimber', 'Kimberely', 'Kimberlee', 'Kimberley', 'Kimberli',
'Kimberlie', 'Kimberly', 'Kimora', 'Kindra', 'Kinley', 'Kinsey',
'Kinsley', 'Kira', 'Kirsten', 'Kirstie', 'Kirstin', 'Kisha', 'Kittie',
'Kitty', 'Kiya', 'Kiyoko', 'Kizzie', 'Kizzy', 'Kloe', 'Kori',
'Kortney', 'Kourtney', 'Kris', 'Krissy', 'Krista', 'Kristal',
'Kristan', 'Kristen', 'Kristi', 'Kristian', 'Kristie', 'Kristin',
'Kristina', 'Kristine', 'Kristy', 'Kristyn', 'Krysta', 'Krystal',
'Krysten', 'Krystin', 'Krystina', 'Krystle', 'Kya', 'Kyara', 'Kyla',
'Kylah', 'Kyle', 'Kylee', 'Kyleigh', 'Kylene', 'Kylie', 'Kyra',
'Kyrie', 'Lacey', 'Laci', 'Lacie', 'Lacy', 'Ladonna', 'Lady', 'Lahoma',
'Laila', 'Lailah', 'Lainey', 'Laisha', 'Lakeisha', 'Laken', 'Lakendra',
'Lakesha', 'Lakeshia', 'Lakisha', 'Lala', 'Lalla', 'Lana', 'Lanette',
'Laney', 'Lani', 'Lanie', 'Lanita', 'Lannie', 'Laquita', 'Lara',
'Larae', 'Laraine', 'Larissa', 'Larue', 'Lashanda', 'Lashawn',
'Lashonda', 'Lashunda', 'Lasonya', 'Lassie', 'Latanya', 'Latarsha',
'Latasha', 'Latesha', 'Latifah', 'Latisha', 'Latonia', 'Latonya',
'Latoria', 'Latosha', 'Latoya', 'Latoyia', 'Latrice', 'Latricia',
'Latrina', 'Launa', 'Laura', 'Laureen', 'Laurel', 'Lauren', 'Laurene',
'Lauretta', 'Laurette', 'Lauri', 'Laurie', 'Laurine', 'Lauryn',
'Lavada', 'Lavelle', 'Lavenia', 'Lavera', 'Lavern', 'Laverna',
'Laverne', 'Lavina', 'Lavinia', 'Lavon', 'Lavona', 'Lavonda',
'Lavonia', 'Lavonne', 'Lawanda', 'Layla', 'Laylah', 'Lea', 'Leafy',
'Leah', 'Leala', 'Leana', 'Leandra', 'Leaner', 'Leann', 'Leanna',
'Leanne', 'Leatha', 'Leatrice', 'Leda', 'Lee', 'Leeann', 'Leesa',
'Leia', 'Leigh', 'Leighton', 'Leila', 'Leilani', 'Leisa', 'Leisha',
'Leitha', 'Lela', 'Lelah', 'Lelar', 'Lelia', 'Lella', 'Lemma', 'Lempi',
'Lena', 'Lenna', 'Lennie', 'Lenora', 'Lenore', 'Leola', 'Leoma',
'Leona', 'Leone', 'Leonia', 'Leonie', 'Leonor', 'Leonora', 'Leonore',
'Leontine', 'Leora', 'Leota', 'Lera', 'Lesa', 'Lesia', 'Leslee',
'Lesley', 'Lesli', 'Leslie', 'Lesly', 'Lessie', 'Lesta', 'Leta',
'Letha', 'Lethia', 'Leticia', 'Letitia', 'Letta', 'Lettie', 'Letty',
'Leva', 'Levina', 'Lexi', 'Lexie', 'Lexis', 'Lexus', 'Leyla', 'Lia',
'Liana', 'Liane', 'Libbie', 'Libby', 'Liberty', 'Lida', 'Liddie',
'Lidia', 'Lidie', 'Lila', 'Lilah', 'Lilia', 'Lilian', 'Liliana',
'Lilianna', 'Lilie', 'Lilla', 'Liller', 'Lillia', 'Lillian',
'Lilliana', 'Lillianna', 'Lillie', 'Lillis', 'Lilly', 'Lily', 'Lilyan',
'Lilyana', 'Lilyanna', 'Lina', 'Linda', 'Lindsay', 'Lindsey', 'Lindy',
'Linette', 'Linna', 'Linnea', 'Linnie', 'Linsey', 'Lisa', 'Lisbeth',
'Lise', 'Lisette', 'Lisha', 'Lissa', 'Lissette', 'Lissie', 'Lita',
'Litha', 'Littie', 'Litzy', 'Livia', 'Liz', 'Liza', 'Lizabeth',
'Lizbeth', 'Lizeth', 'Lizette', 'Lizzie', 'Lockie', 'Loda', 'Logan',
'Lois', 'Lola', 'Lolita', 'Lolla', 'Lollie', 'Loma', 'Lona', 'London',
'Londyn', 'Loni', 'Lonie', 'Lonna', 'Lonnie', 'Lora', 'Loraine',
'Lorayne', 'Lorean', 'Loree', 'Loreen', 'Lorelai', 'Lorelei', 'Loren',
'Lorena', 'Lorene', 'Lorenza', 'Loretta', 'Loretto', 'Lori', 'Loria',
'Loriann', 'Lorie', 'Lorinda', 'Lorine', 'Loris', 'Lorna', 'Lorraine',
'Lorrayne', 'Lorri', 'Lorrie', 'Lossie', 'Lota', 'Lotta', 'Lottie',
'Lou', 'Louann', 'Louanna', 'Louella', 'Louetta', 'Louie', 'Louisa',
'Louise', 'Louisiana', 'Loula', 'Lourdes', 'Louvenia', 'Love', 'Lovey',
'Lovie', 'Lovina', 'Lovisa', 'Loyce', 'Lu', 'Luana', 'Luann', 'Luanne',
'Luberta', 'Lucero', 'Lucetta', 'Lucia', 'Luciana', 'Lucie', 'Lucile',
'Lucille', 'Lucina', 'Lucinda', 'Lucindy', 'Lucretia', 'Lucy', 'Luda',
'Ludie', 'Lue', 'Luella', 'Luetta', 'Lugenia', 'Luisa', 'Lula',
'Lulah', 'Lular', 'Lulie', 'Lulla', 'Lulu', 'Luna', 'Lupe', 'Lura',
'Lurana', 'Lurena', 'Lurline', 'Lutie', 'Luvenia', 'Luverne',
'Luvinia', 'Luz', 'Lyda', 'Lydia', 'Lyla', 'Lylah', 'Lyn', 'Lynda',
'Lyndia', 'Lyndsay', 'Lyndsey', 'Lynette', 'Lynn', 'Lynne', 'Lynnette',
'Lynsey', 'Lyric', 'Mabel', 'Mabell', 'Mabelle', 'Mable', 'Macel',
'Macey', 'Machelle', 'Maci', 'Macie', 'Mackenzie', 'Macy', 'Madaline',
'Madalyn', 'Madalynn', 'Maddison', 'Madeleine', 'Madelene', 'Madeline',
'Madelyn', 'Madelynn', 'Madge', 'Madie', 'Madilyn', 'Madilynn',
'Madisen', 'Madison', 'Madisyn', 'Madlyn', 'Madonna', 'Madora',
'Madyson', 'Mae', 'Maebell', 'Maebelle', 'Maegan', 'Maeve', 'Mafalda',
'Magan', 'Magdalen', 'Magdalena', 'Magdalene', 'Magen', 'Maggie',
'Magnolia', 'Mahala', 'Mahalia', 'Mahalie', 'Mai', 'Maia', 'Maida',
'Maira', 'Maiya', 'Makaila', 'Makala', 'Makayla', 'Makena', 'Makenna',
'Makenzie', 'Malaya', 'Maleah', 'Malia', 'Maliah', 'Malinda',
'Malissa', 'Malissie', 'Maliyah', 'Mallie', 'Mallorie', 'Mallory',
'Malorie', 'Malvina', 'Mame', 'Mamie', 'Mammie', 'Manda', 'Mandi',
'Mandie', 'Mandy', 'Manerva', 'Manervia', 'Manie', 'Manila', 'Manilla',
'Mannie', 'Manuela', 'Manuelita', 'Mara', 'Maralyn', 'Maranda',
'Marcela', 'Marcelina', 'Marceline', 'Marcella', 'Marcelle', 'Marci',
'Marcia', 'Marcie', 'Marcy', 'Mardell', 'Mareli', 'Marely', 'Maren',
'Margaret', 'Margarete', 'Margaretha', 'Margarett', 'Margaretta',
'Margarette', 'Margarita', 'Margarite', 'Marge', 'Margene', 'Margeret',
'Margery', 'Marget', 'Margie', 'Margo', 'Margot', 'Margret',
'Margrett', 'Margretta', 'Marguerite', 'Margueritte', 'Margurite',
'Margy', 'Mari', 'Maria', 'Mariah', 'Mariam', 'Marian', 'Mariana',
'Marianita', 'Mariann', 'Marianna', 'Marianne', 'Maribel', 'Maribeth',
'Maricela', 'Marie', 'Mariel', 'Mariela', 'Marietta', 'Marilee',
'Marilla', 'Marilou', 'Marilyn', 'Marilynn', 'Marin', 'Marina',
'Marinda', 'Marion', 'Marisa', 'Marisela', 'Marisol', 'Marissa',
'Marita', 'Maritza', 'Mariyah', 'Marjorie', 'Marjory', 'Markita',
'Marla', 'Marlana', 'Marlee', 'Marleen', 'Marleigh', 'Marlen',
'Marlena', 'Marlene', 'Marley', 'Marlie', 'Marlo', 'Marlyn', 'Marlys',
'Marni', 'Marnie', 'Marnita', 'Marolyn', 'Marquita', 'Marry', 'Marsha',
'Marta', 'Martha', 'Marti', 'Martika', 'Martina', 'Martine', 'Marty',
'Marva', 'Marvel', 'Mary', 'Maryam', 'Maryann', 'Maryanne',
'Marybelle', 'Marybeth', 'Maryellen', 'Maryjane', 'Maryjo', 'Marylee',
'Marylin', 'Marylou', 'Marylouise', 'Marylyn', 'Masako', 'Mathilda',
'Mathilde', 'Matie', 'Matilda', 'Matilde', 'Mattie', 'Mattye', 'Maud',
'Maude', 'Maudie', 'Maura', 'Maureen', 'Maurine', 'Mavis', 'Maxie',
'Maxine', 'May', 'Maya', 'Maybell', 'Maybelle', 'Maye', 'Mayme',
'Maymie', 'Mayra', 'Mazie', 'Mckayla', 'Mckenna', 'Mckenzie',
'Mckinley', 'Meadow', 'Meagan', 'Meaghan', 'Mechelle', 'Meda', 'Media',
'Medora', 'Meg', 'Megan', 'Meggan', 'Meghan', 'Meghann', 'Melanie',
'Melany', 'Melba', 'Melina', 'Melinda', 'Melisa', 'Melissa',
'Melissia', 'Mell', 'Mellie', 'Mellisa', 'Mellissa', 'Melodee',
'Melodie', 'Melody', 'Melonie', 'Melony', 'Melva', 'Melvina', 'Mena',
'Mendy', 'Mercedes', 'Mercy', 'Meredith', 'Merilyn', 'Merle',
'Merlene', 'Merna', 'Merri', 'Merrie', 'Merrilee', 'Merrily', 'Merry',
'Mertie', 'Meryl', 'Meta', 'Metha', 'Metta', 'Mettie', 'Mia', 'Miah',
'Micaela', 'Micah', 'Micayla', 'Michaela', 'Michaele', 'Michal',
'Michele', 'Michelina', 'Michell', 'Michelle', 'Mickey', 'Mickie',
'Miesha', 'Migdalia', 'Mignon', 'Mikaela', 'Mikaila', 'Mikala',
'Mikalah', 'Mikayla', 'Mila', 'Milagros', 'Milan', 'Milda', 'Mildred',
'Miley', 'Milissa', 'Millicent', 'Millie', 'Milly', 'Mima', 'Mimi',
'Mina', 'Minda', 'Mindi', 'Mindy', 'Minerva', 'Minervia', 'Minna',
'Minnie', 'Minta', 'Mintie', 'Mira', 'Miracle', 'Miranda', 'Mireya',
'Miriah', 'Miriam', 'Mirna', 'Mirtie', 'Missie', 'Missouri', 'Missy',
'Misti', 'Mistie', 'Misty', 'Mittie', 'Mitzi', 'Miya', 'Modena',
'Moesha', 'Moira', 'Mollie', 'Molly', 'Mona', 'Monica', 'Monika',
'Monique', 'Monna', 'Monnie', 'Monserrat', 'Montana', 'Montie', 'Mora',
'Morgan', 'Moriah', 'Mossie', 'Mozell', 'Mozella', 'Mozelle', 'Muriel',
'Murl', 'Mya', 'Myah', 'Myla', 'Mylee', 'Mylie', 'Myra', 'Myranda',
'Myrl', 'Myrle', 'Myrna', 'Myrta', 'Myrtice', 'Myrtie', 'Myrtis',
'Myrtle', 'Nada', 'Nadia', 'Nadine', 'Naima', 'Nakia', 'Nakisha',
'Nakita', 'Nallely', 'Nan', 'Nana', 'Nanci', 'Nancie', 'Nancy',
'Nanette', 'Nanie', 'Nanna', 'Nannette', 'Nannie', 'Naoma', 'Naomi',
'Narcissus', 'Natalee', 'Natalia', 'Natalie', 'Nataly', 'Natalya',
'Natasha', 'Nathalia', 'Nathalie', 'Nathaly', 'Natosha', 'Nautica',
'Nayeli', 'Nayely', 'Nealie', 'Nealy', 'Nedra', 'Neha', 'Nelda',
'Nelia', 'Nelie', 'Nell', 'Nella', 'Nelle', 'Nellie', 'Nelly', 'Nena',
'Neola', 'Neoma', 'Neppie', 'Nereida', 'Neta', 'Netta', 'Nettie',
'Neva', 'Nevada', 'Nevaeh', 'Neveah', 'Nia', 'Nichelle', 'Nichol',
'Nichole', 'Nicki', 'Nicola', 'Nicole', 'Nicolette', 'Nicolle', 'Niki',
'Nikia', 'Nikita', 'Nikki', 'Nikole', 'Nila', 'Nilda', 'Nina',
'Ninnie', 'Nira', 'Nita', 'Nobie', 'Noel', 'Noelia', 'Noelle', 'Noemi',
'Noemie', 'Nohely', 'Nola', 'Nolia', 'Nolie', 'Noma', 'Nona', 'Nonie',
'Nora', 'Norah', 'Noreen', 'Norene', 'Noreta', 'Noretta', 'Norine',
'Norita', 'Norma', 'Nova', 'Novella', 'Nya', 'Nyah', 'Nyasia', 'Nyla',
'Nylah', 'Nyree', 'Ocie', 'Octa', 'Octavia', 'Octavie', 'Oda',
'Odalis', 'Odalys', 'Odelia', 'Odell', 'Odessa', 'Odette', 'Odie',
'Odile', 'Ofelia', 'Ola', 'Olar', 'Olena', 'Olene', 'Oleta', 'Olevia',
'Olga', 'Olie', 'Olinda', 'Oline', 'Oliva', 'Olive', 'Olivia',
'Olivine', 'Ollie', 'Olympia', 'Oma', 'Omie', 'Ona', 'Oneida', 'Oneta',
'Oney', 'Onie', 'Onnie', 'Opal', 'Opha', 'Ophelia', 'Ora', 'Orah',
'Oral', 'Oralia', 'Orelia', 'Orene', 'Orilla', 'Orlena', 'Orma',
'Orpha', 'Orra', 'Orrie', 'Osa', 'Osie', 'Ossie', 'Ota', 'Otelia',
'Otha', 'Ottie', 'Ottilia', 'Ottilie', 'Ouida', 'Ova', 'Ozell',
'Ozella', 'Ozie', 'Paige', 'Pairlee', 'Paisley', 'Paityn', 'Pallie',
'Palma', 'Paloma', 'Pam', 'Pamala', 'Pamela', 'Pamelia', 'Pamella',
'Pandora', 'Pansy', 'Paola', 'Paralee', 'Paris', 'Parker', 'Parlee',
'Parthenia', 'Pat', 'Patience', 'Patrica', 'Patrice', 'Patricia',
'Patsy', 'Patti', 'Pattie', 'Patty', 'Paula', 'Pauletta', 'Paulette',
'Paulina', 'Pauline', 'Payten', 'Payton', 'Pearl', 'Pearla', 'Pearle',
'Pearlene', 'Pearlie', 'Pearline', 'Pearly', 'Peggie', 'Peggy',
'Penelope', 'Penni', 'Pennie', 'Penny', 'Pepper', 'Perla', 'Permelia',
'Perri', 'Petra', 'Peyton', 'Phebe', 'Pheobe', 'Phillis', 'Philomena',
'Philomene', 'Phoebe', 'Phoenix', 'Phylicia', 'Phylis', 'Phyliss',
'Phyllis', 'Pink', 'Pinkey', 'Pinkie', 'Piper', 'Pluma', 'Pollie',
'Polly', 'Porsche', 'Porsha', 'Portia', 'Precious', 'Presley',
'Pricilla', 'Princess', 'Priscila', 'Priscilla', 'Prudence', 'Prudie',
'Qiana', 'Queen', 'Queenie', 'Quiana', 'Quinn', 'Rachael', 'Racheal',
'Rachel', 'Rachelle', 'Racquel', 'Rae', 'Raegan', 'Raelyn', 'Raelynn',
'Rafaela', 'Ragna', 'Raina', 'Ramona', 'Randi', 'Raquel', 'Rashida',
'Raven', 'Rayna', 'Rayne', 'Reagan', 'Reanna', 'Reatha', 'Reba',
'Rebeca', 'Rebecca', 'Rebekah', 'Reece', 'Reese', 'Regan', 'Regena',
'Regenia', 'Regina', 'Reilly', 'Reina', 'Rella', 'Rena', 'Renada',
'Renae', 'Renata', 'Rene', 'Renea', 'Renee', 'Renita', 'Rennie',
'Ressie', 'Reta', 'Retha', 'Retta', 'Rettie', 'Reva', 'Reyna', 'Rhea',
'Rheta', 'Rhianna', 'Rhiannon', 'Rhoda', 'Rhona', 'Rhonda', 'Rianna',
'Richelle', 'Ricki', 'Rihanna', 'Rikki', 'Riley', 'Rilla', 'Rillie',
'Rinda', 'Risa', 'Rita', 'River', 'Riya', 'Robbie', 'Robbin',
'Roberta', 'Robin', 'Robyn', 'Rochelle', 'Rocio', 'Roena', 'Rolanda',
'Roma', 'Romaine', 'Romona', 'Rona', 'Ronda', 'Roni', 'Ronna',
'Ronnie', 'Rory', 'Rosa', 'Rosabelle', 'Rosalee', 'Rosalia', 'Rosalie',
'Rosalind', 'Rosalinda', 'Rosaline', 'Rosalyn', 'Rosamond', 'Rosann',
'Rosanna', 'Rosanne', 'Rosaria', 'Rosario', 'Rose', 'Roseann',
'Roseanna', 'Roseanne', 'Rosella', 'Roselyn', 'Rosemarie', 'Rosemary',
'Rosena', 'Rosetta', 'Rosey', 'Rosia', 'Rosie', 'Rosina', 'Rosita',
'Roslyn', 'Rossie', 'Rosy', 'Rowan', 'Rowena', 'Roxana', 'Roxane',
'Roxann', 'Roxanna', 'Roxanne', 'Roxie', 'Roxy', 'Rozanne', 'Rozella',
'Rubi', 'Rubie', 'Ruby', 'Rubye', 'Ruie', 'Ruth', 'Rutha', 'Ruthann',
'Ruthanne', 'Ruthe', 'Ruthie', 'Ryann', 'Rylan', 'Rylee', 'Ryleigh',
'Rylie', 'Sabina', 'Sable', 'Sabra', 'Sabrina', 'Sada', 'Sade',
'Sadie', 'Sadye', 'Sage', 'Saige', 'Salena', 'Salina', 'Sallie',
'Sally', 'Salma', 'Salome', 'Samantha', 'Samara', 'Samatha', 'Samira',
'Samiyah', 'Sammie', 'Sanaa', 'Sanai', 'Sandi', 'Sandie', 'Sandra',
'Sandy', 'Saniya', 'Saniyah', 'Sanjuana', 'Sanjuanita', 'Sannie',
'Santa', 'Santana', 'Santina', 'Santos', 'Sara', 'Sarah', 'Sarahi',
'Sarai', 'Sariah', 'Sarina', 'Sarita', 'Sarrah', 'Sasha', 'Saundra',
'Savana', 'Savanah', 'Savanna', 'Savannah', 'Savilla', 'Scarlet',
'Scarlett', 'Sebrina', 'Selah', 'Selena', 'Selene', 'Selina', 'Selma',
'Sena', 'Senora', 'Serena', 'Serenity', 'Serina', 'Shae', 'Shaina',
'Shakira', 'Shalon', 'Shalonda', 'Shameka', 'Shamika', 'Shana',
'Shanae', 'Shanda', 'Shandra', 'Shane', 'Shaneka', 'Shanell',
'Shanelle', 'Shanequa', 'Shani', 'Shania', 'Shanice', 'Shaniece',
'Shanika', 'Shaniqua', 'Shanita', 'Shaniya', 'Shanna', 'Shannan',
'Shannen', 'Shannon', 'Shanon', 'Shanta', 'Shante', 'Shantel',
'Shantell', 'Shaquana', 'Shaquita', 'Shara', 'Shardae', 'Sharday',
'Sharde', 'Sharee', 'Sharen', 'Shari', 'Sharita', 'Sharla', 'Sharleen',
'Sharlene', 'Sharman', 'Sharon', 'Sharonda', 'Sharron', 'Sharyl',
'Sharyn', 'Shasta', 'Shatara', 'Shauna', 'Shaunna', 'Shavon',
'Shavonne', 'Shawanda', 'Shawna', 'Shawnda', 'Shawnee', 'Shawnna',
'Shawnte', 'Shay', 'Shayla', 'Shaylee', 'Shayna', 'Shea', 'Sheena',
'Sheila', 'Sheilah', 'Shelba', 'Shelbi', 'Shelbie', 'Shelby', 'Shelia',
'Shelley', 'Shelli', 'Shellie', 'Shelly', 'Shelva', 'Shelvia',
'Shelvie', 'Shena', 'Shenna', 'Sheree', 'Sheri', 'Sheridan', 'Sherie',
'Sherilyn', 'Sherita', 'Sherlyn', 'Sheron', 'Sherree', 'Sherri',
'Sherrie', 'Sherrill', 'Sherron', 'Sherry', 'Sherryl', 'Sheryl',
'Sheryll', 'Sheyla', 'Shianne', 'Shiela', 'Shiloh', 'Shira', 'Shirl',
'Shirlee', 'Shirleen', 'Shirlene', 'Shirley', 'Shirleyann', 'Shirlie',
'Shona', 'Shonda', 'Shonna', 'Shreya', 'Shyann', 'Shyanne', 'Shyla',
'Sibbie', 'Sibyl', 'Siddie', 'Sidney', 'Siena', 'Sienna', 'Sierra',
'Signa', 'Signe', 'Sigrid', 'Silvia', 'Simona', 'Simone', 'Sina',
'Sinda', 'Siobhan', 'Sister', 'Sky', 'Skye', 'Skyla', 'Skylar',
'Skyler', 'Sloane', 'Socorro', 'Sofia', 'Soledad', 'Somer', 'Sommer',
'Sondra', 'Sonia', 'Sonja', 'Sonji', 'Sonya', 'Sophia', 'Sophie',
'Sophronia', 'Spring', 'Stacey', 'Staci', 'Stacia', 'Stacie', 'Stacy',
'Star', 'Starla', 'Starr', 'Stasia', 'Stefani', 'Stefanie', 'Stella',
'Stephaine', 'Stephani', 'Stephania', 'Stephanie', 'Stephany',
'Stephenie', 'Stevie', 'Stormy', 'Sudie', 'Sue', 'Suellen', 'Sula',
'Summer', 'Sunday', 'Sunny', 'Sunshine', 'Susan', 'Susana', 'Susann',
'Susanna', 'Susannah', 'Susanne', 'Susie', 'Sussie', 'Suzan', 'Suzann',
'Suzanna', 'Suzanne', 'Suzette', 'Suzie', 'Suzy', 'Sybil', 'Sybilla',
'Syble', 'Sydell', 'Sydnee', 'Sydney', 'Sydni', 'Sydnie', 'Sylva',
'Sylvania', 'Sylvia', 'Symone', 'Syreeta', 'Tabatha', 'Tabetha',
'Tabitha', 'Tai', 'Taina', 'Taja', 'Takisha', 'Talia', 'Taliyah',
'Tamala', 'Tamara', 'Tamatha', 'Tambra', 'Tameka', 'Tamekia', 'Tamela',
'Tamera', 'Tami', 'Tamia', 'Tamica', 'Tamie', 'Tamika', 'Tamiko',
'Tamisha', 'Tammi', 'Tammie', 'Tammy', 'Tamra', 'Tamya', 'Tana',
'Tanesha', 'Tangela', 'Tania', 'Tanika', 'Tanisha', 'Taniya',
'Taniyah', 'Tanja', 'Tanya', 'Tara', 'Tarah', 'Taraji', 'Tari',
'Tarsha', 'Taryn', 'Tasha', 'Tashina', 'Tasia', 'Tatia', 'Tatiana',
'Tatianna', 'Tatum', 'Tatyana', 'Tatyanna', 'Tawana', 'Tawanda',
'Tawanna', 'Tawny', 'Tawnya', 'Taya', 'Tayla', 'Tayler', 'Taylor',
'Tea', 'Teagan', 'Teela', 'Teena', 'Tella', 'Tempie', 'Tena', 'Tenika',
'Tenisha', 'Tennessee', 'Tennie', 'Tennille', 'Tera', 'Teresa',
'Terese', 'Teressa', 'Teri', 'Terra', 'Terri', 'Terrie', 'Terry',
'Tess', 'Tessa', 'Tessie', 'Texanna', 'Texas', 'Texie', 'Thalia',
'Thea', 'Theda', 'Thekla', 'Thelma', 'Theodocia', 'Theodora',
'Theodosia', 'Theola', 'Theresa', 'Therese', 'Theresia', 'Theta',
'Thomasina', 'Thora', 'Thresa', 'Thursa', 'Thyra', 'Tia', 'Tiana',
'Tianna', 'Tiara', 'Tiarra', 'Tiera', 'Tierra', 'Tiesha', 'Tiffani',
'Tiffanie', 'Tiffany', 'Tilda', 'Tilla', 'Tillie', 'Tina', 'Tiney',
'Tinie', 'Tinnie', 'Tiny', 'Tisa', 'Tisha', 'Tishie', 'Tobi', 'Toby',
'Toccara', 'Tomasa', 'Tomeka', 'Tomika', 'Tommie', 'Tonda', 'Toni',
'Tonia', 'Tonja', 'Tonya', 'Tori', 'Torie', 'Torrie', 'Tory', 'Tosha',
'Toshiko', 'Towanda', 'Toya', 'Tracee', 'Tracey', 'Traci', 'Tracie',
'Tracy', 'Treasure', 'Treena', 'Trena', 'Tresa', 'Tressa', 'Tressie',
'Treva', 'Tricia', 'Trilby', 'Trina', 'Trinidad', 'Trinity', 'Trish',
'Trisha', 'Trista', 'Tristan', 'Tristen', 'Trudi', 'Trudie', 'Trudy',
'Trula', 'Tula', 'Twila', 'Twyla', 'Tyesha', 'Tyra', 'Ula', 'Una',
'Unique', 'Unknown', 'Ura', 'Ursula', 'Vada', 'Val', 'Valarie',
'Valencia', 'Valentina', 'Valentine', 'Valeria', 'Valerie', 'Valery',
'Valinda', 'Vallie', 'Valorie', 'Vanesa', 'Vanessa', 'Vannie', 'Vara',
'Vashti', 'Vassie', 'Veda', 'Vela', 'Velda', 'Velia', 'Vella', 'Velma',
'Velva', 'Velvet', 'Vena', 'Venessa', 'Venice', 'Venie', 'Venita',
'Vennie', 'Venus', 'Veola', 'Vera', 'Verda', 'Verdell', 'Verdie',
'Verena', 'Vergie', 'Verla', 'Verlene', 'Verlie', 'Verna', 'Verne',
'Vernell', 'Vernelle', 'Vernetta', 'Vernia', 'Vernice', 'Vernie',
'Vernita', 'Verona', 'Veronica', 'Versa', 'Versie', 'Vertie', 'Vessie',
'Vesta', 'Veta', 'Veva', 'Vicie', 'Vickey', 'Vicki', 'Vickie', 'Vicky',
'Victoria', 'Victorine', 'Victory', 'Vicy', 'Vida', 'Vikki', 'Villa',
'Vilma', 'Vina', 'Vincenza', 'Viney', 'Vinie', 'Vinnie', 'Viola',
'Violet', 'Violeta', 'Violetta', 'Violette', 'Vira', 'Virdie',
'Virgia', 'Virgie', 'Virginia', 'Viridiana', 'Vita', 'Viva', 'Vivian',
'Viviana', 'Vivien', 'Vivienne', 'Vlasta', 'Vonda', 'Vonetta',
'Vonnie', 'Wanda', 'Waneta', 'Wanita', 'Wava', 'Wende', 'Wendi',
'Wendy', 'Whitley', 'Whitney', 'Wilda', 'Wilhelmina', 'Wilhelmine',
'Willa', 'Willene', 'Willia', 'Willie', 'Williemae', 'Willodean',
'Willow', 'Wilma', 'Windy', 'Winifred', 'Winnie', 'Winnifred',
'Winona', 'Winter', 'Wynona', 'Xena', 'Ximena', 'Xiomara', 'Yadira',
'Yahaira', 'Yajaira', 'Yamilet', 'Yamilex', 'Yareli', 'Yaretzi',
'Yaritza', 'Yasmeen', 'Yasmin', 'Yasmine', 'Yazmin', 'Yesenia',
'Yessenia', 'Yetta', 'Yolanda', 'Yolonda', 'Yoselin', 'Yoshiko',
'Yuliana', 'Yulisa', 'Yulissa', 'Yuridia', 'Yvette', 'Yvonne', 'Zada',
'Zadie', 'Zaida', 'Zana', 'Zandra', 'Zaniyah', 'Zara', 'Zaria',
'Zariah', 'Zela', 'Zelda', 'Zelia', 'Zella', 'Zelma', 'Zelpha', 'Zena',
'Zenobia', 'Zeta', 'Zetta', 'Zettie', 'Zhane', 'Zillah', 'Zilpah',
'Zilpha', 'Zina', 'Zion', 'Zita', 'Zoa', 'Zoe', 'Zoey', 'Zoie', 'Zola',
'Zona', 'Zora', 'Zula'
)
first_names_male = ('Aaden', 'Aarav', 'Aaron', 'Ab', 'Abb', 'Abbott',
'Abdiel', 'Abdul', 'Abdullah', 'Abe', 'Abel', 'Abelardo', 'Abie',
'Abner', 'Abraham', 'Abram', 'Ace', 'Acey', 'Acie', 'Acy', 'Adalberto',
'Adam', 'Adams', 'Adan', 'Add', 'Adelard', 'Adelbert', 'Aden', 'Adin',
'Aditya', 'Adlai', 'Admiral', 'Adolf', 'Adolfo', 'Adolph', 'Adolphus',
'Adonis', 'Adrain', 'Adrian', 'Adriel', 'Adrien', 'Adron', 'Aedan',
'Agustin', 'Agustus', 'Ah', 'Ahmad', 'Ahmed', 'Aidan', 'Aiden',
'Aidyn', 'Aime', 'Akeem', 'Al', 'Alan', 'Alanzo', 'Albert', 'Alberto',
'Albertus', 'Albin', 'Albion', 'Alby', 'Alcee', 'Alcide', 'Alden',
'Aldo', 'Alec', 'Aleck', 'Alejandro', 'Alek', 'Alessandro', 'Alex',
'Alexande', 'Alexander', 'Alexandre', 'Alexandro', 'Alexis',
'Alexzander', 'Alf', 'Alferd', 'Alfie', 'Alfonse', 'Alfonso',
'Alfonzo', 'Alford', 'Alfred', 'Alfredo', 'Alger', 'Algernon', 'Algie',
'Algot', 'Ali', 'Alijah', 'Allan', 'Allen', 'Allyn', 'Almer', 'Almon',
'Almond', 'Almus', 'Alois', 'Alonso', 'Alonza', 'Alonzo', 'Aloys',
'Aloysius', 'Alpheus', 'Alphons', 'Alphonse', 'Alphonso', 'Alphonsus',
'Alston', 'Alto', 'Alton', 'Alva', 'Alvah', 'Alvan', 'Alvaro', 'Alver',
'Alvia', 'Alvie', 'Alvin', 'Alvis', 'Alvy', 'Alwin', 'Amado', 'Amare',
'Amari', 'Amarion', 'Amasa', 'Ambers', 'Ambrose', 'Americo', 'Amerigo',
'Amil', 'Amin', 'Amir', 'Amit', 'Ammon', 'Amon', 'Amos', 'Ananias',
'Anastacio', 'Anatole', 'Ancel', 'Ancil', 'Anders', 'Anderson',
'Andon', 'Andra', 'Andrae', 'Andre', 'Andreas', 'Andres', 'Andrew',
'Andy', 'Anfernee', 'Angel', 'Angelo', 'Angus', 'Anibal', 'Ansel',
'Anson', 'Anthoney', 'Anthony', 'Antione', 'Antoine', 'Anton',
'Antone', 'Antonio', 'Antony', 'Antwain', 'Antwan', 'Antwon', 'Anwar',
'Arba', 'Arbie', 'Arch', 'Archer', 'Archibald', 'Archie', 'Ardell',
'Arden', 'Ari', 'Aric', 'Arjun', 'Arlan', 'Arland', 'Arlen', 'Arley',
'Arlie', 'Arlin', 'Arlington', 'Arlis', 'Arlo', 'Arlyn', 'Arman',
'Armand', 'Armando', 'Armani', 'Armin', 'Armond', 'Armstead', 'Arnav',
'Arne', 'Arnett', 'Arnie', 'Arno', 'Arnold', 'Arnoldo', 'Arnulfo',
'Aron', 'Arron', 'Arsenio', 'Art', 'Arther', 'Arthor', 'Arthur',
'Artie', 'Artis', 'Arturo', 'Arvel', 'Arvid', 'Arvil', 'Arvin', 'Arvo',
'Aryan', 'Asa', 'Asberry', 'Asbury', 'Ashby', 'Asher', 'Ashton',
'Atha', 'Atlas', 'Atticus', 'Attilio', 'Aubra', 'Aubrey', 'Audie',
'Audley', 'Audy', 'August', 'Auguste', 'Augustin', 'Augustine',
'Augustus', 'Aurelio', 'Aurthur', 'Austen', 'Austin', 'Auston',
'Austyn', 'Auther', 'Author', 'Authur', 'Autry', 'Avery', 'Avon',
'Axel', 'Ayaan', 'Aydan', 'Ayden', 'Aydin', 'Babe', 'Babyboy',
'Bailey', 'Baker', 'Baldwin', 'Ballard', 'Banks', 'Barnard', 'Barnett',
'Barney', 'Barnie', 'Baron', 'Barrett', 'Barrie', 'Barron', 'Barry',
'Bart', 'Bartholomew', 'Bartley', 'Barton', 'Bascom', 'Basil',
'Baxter', 'Bayard', 'Beau', 'Beckett', 'Beckham', 'Bedford', 'Beecher',
'Bell', 'Belton', 'Ben', 'Benard', 'Benedict', 'Benito', 'Benjaman',
'Benjamen', 'Benjamin', 'Benjamine', 'Benji', 'Benjiman', 'Benjman',
'Bennett', 'Bennie', 'Benny', 'Benson', 'Bentley', 'Benton', 'Berkley',
'Berlin', 'Bernard', 'Bernardo', 'Bernhard', 'Bernie', 'Berry', 'Bert',
'Bertie', 'Berton', 'Bertram', 'Bertrand', 'Beryl', 'Bethel', 'Bilal',
'Bill', 'Billie', 'Billy', 'Bird', 'Birt', 'Bishop', 'Bjorn', 'Blain',
'Blaine', 'Blair', 'Blaise', 'Blake', 'Blanchard', 'Blane', 'Blas',
'Blaze', 'Bliss', 'Bluford', 'Bo', 'Bob', 'Bobbie', 'Bobby', 'Bode',
'Bolden', 'Booker', 'Boone', 'Boris', 'Bose', 'Boss', 'Boston',
'Bowman', 'Boyce', 'Boyd', 'Boysie', 'Brad', 'Braden', 'Bradford',
'Bradley', 'Bradly', 'Brady', 'Bradyn', 'Braeden', 'Braedon',
'Braiden', 'Brain', 'Branch', 'Brandan', 'Branden', 'Brandin',
'Brandon', 'Brandt', 'Brandy', 'Brandyn', 'Brannon', 'Branson',
'Brant', 'Brantley', 'Braulio', 'Braxton', 'Brayan', 'Brayden',
'Braydon', 'Braylen', 'Braylon', 'Brendan', 'Brenden', 'Brendon',
'Brennan', 'Brennen', 'Brennon', 'Brent', 'Brenton', 'Bret', 'Brett',
'Brian', 'Brice', 'Bridger', 'Brien', 'Brion', 'Britt', 'Brittany',
'Britton', 'Brock', 'Broderick', 'Brodie', 'Brody', 'Brogan',
'Bronson', 'Brook', 'Brooks', 'Brown', 'Bruce', 'Bruno', 'Bryan',
'Bryant', 'Bryce', 'Brycen', 'Bryon', 'Bryson', 'Bryton', 'Buck',
'Bud', 'Budd', 'Buddie', 'Buddy', 'Buel', 'Buell', 'Buford', 'Bunk',
'Burdette', 'Buren', 'Burgess', 'Burk', 'Burke', 'Burl', 'Burleigh',
'Burley', 'Burnell', 'Burnett', 'Burney', 'Burnice', 'Burnie', 'Burns',
'Burr', 'Burrel', 'Burrell', 'Burt', 'Burton', 'Bush', 'Buster',
'Butch', 'Butler', 'Bynum', 'Byrd', 'Byron', 'Cade', 'Caden', 'Cael',
'Caesar', 'Caiden', 'Cain', 'Cal', 'Cale', 'Caleb', 'Calhoun',
'Callie', 'Callum', 'Calvin', 'Cam', 'Camden', 'Cameron', 'Camilo',
'Campbell', 'Camren', 'Camron', 'Camryn', 'Candido', 'Cannon',
'Canyon', 'Cap', 'Captain', 'Carey', 'Carl', 'Carleton', 'Carlie',
'Carlisle', 'Carlo', 'Carlos', 'Carlton', 'Carlyle', 'Carmel',
'Carmelo', 'Carmen', 'Carmine', 'Carnell', 'Carrie', 'Carrol',
'Carroll', 'Carsen', 'Carson', 'Carter', 'Cary', 'Cas', 'Case',
'Casen', 'Casey', 'Cash', 'Casimer', 'Casimir', 'Casimiro', 'Cason',
'Casper', 'Cass', 'Cassidy', 'Cassie', 'Cassius', 'Caswell', 'Cato',
'Cayden', 'Ceasar', 'Cecil', 'Cedric', 'Cedrick', 'Celestino',
'Cephus', 'Cesar', 'Ceylon', 'Chace', 'Chad', 'Chadd', 'Chadrick',
'Chadwick', 'Chaim', 'Chalmer', 'Chalmers', 'Champ', 'Chance',
'Chancey', 'Chancy', 'Chandler', 'Channing', 'Charle', 'Charles',
'Charley', 'Charlie', 'Charls', 'Charlton', 'Charly', 'Chas', 'Chase',
'Chauncey', 'Chauncy', 'Chaz', 'Che', 'Chesley', 'Chester', 'Chet',
'Cheyenne', 'Chin', 'Chip', 'Chris', 'Christ', 'Christian',
'Christina', 'Christion', 'Christop', 'Christoper', 'Christophe',
'Christopher', 'Chuck', 'Cicero', 'Clabe', 'Claiborne', 'Clair',
'Clarance', 'Clare', 'Clarence', 'Clark', 'Clarke', 'Clarnce', 'Claud',
'Claude', 'Claudie', 'Claudio', 'Claudius', 'Claus', 'Clay', 'Clayton',
'Clearence', 'Cleave', 'Clell', 'Clem', 'Clemence', 'Clemens',
'Clement', 'Clemente', 'Clemmie', 'Clemon', 'Cleo', 'Cleon', 'Cletus',
'Cleve', 'Cleveland', 'Clide', 'Cliff', 'Clifford', 'Clifton', 'Clint',
'Clinton', 'Clive', 'Clovis', 'Cloyd', 'Clyde', 'Coby', 'Codey',
'Codi', 'Codie', 'Cody', 'Coen', 'Cohen', 'Colbert', 'Colby', 'Cole',
'Coleman', 'Coleton', 'Coley', 'Colie', 'Colin', 'Collie', 'Collier',
'Collin', 'Collins', 'Collis', 'Colon', 'Colonel', 'Colt', 'Colten',
'Colter', 'Colton', 'Columbus', 'Colvin', 'Commodore', 'Con', 'Conard',
'Conley', 'Conner', 'Connie', 'Connor', 'Conor', 'Conrad',
'Constantine', 'Conway', 'Coolidge', 'Cooper', 'Corbett', 'Corbin',
'Cordaro', 'Cordell', 'Cordero', 'Corey', 'Cornel', 'Cornelious',
'Cornelius', 'Cornell', 'Corry', 'Cortez', 'Cortney', 'Corwin', 'Cory',
'Cosmo', 'Coty', 'Council', 'Courtland', 'Courtney', 'Coy', 'Craig',
'Crawford', 'Creed', 'Cris', 'Cristian', 'Cristobal', 'Cristofer',
'Cristopher', 'Crockett', 'Cruz', 'Cullen', 'Curley', 'Curt', 'Curtis',
'Curtiss', 'Cyril', 'Cyrus', 'Dabney', 'Dakoda', 'Dakota', 'Dakotah',
'Dale', 'Dallas', 'Dallin', 'Dalton', 'Dalvin', 'Damarcus', 'Damari',
'Damarion', 'Dameon', 'Damian', 'Damien', 'Damion', 'Damon', 'Damond',
'Dan', 'Dana', 'Dandre', 'Dane', 'Dangelo', 'Danial', 'Daniel', 'Dann',
'Dannie', 'Danniel', 'Danny', 'Dante', 'Daquan', 'Darby', 'Darcy',
'Darell', 'Daren', 'Darian', 'Darien', 'Darin', 'Dario', 'Darion',
'Darius', 'Darl', 'Darnell', 'Darold', 'Daron', 'Darrel', 'Darrell',
'Darren', 'Darrian', 'Darrick', 'Darrien', 'Darrin', 'Darrion',
'Darrius', 'Darron', 'Darry', 'Darryl', 'Darryle', 'Darryll', 'Darryn',
'Darvin', 'Darwin', 'Darwyn', 'Daryl', 'Daryle', 'Daryn', 'Dashawn',
'Daulton', 'Daunte', 'Davante', 'Dave', 'Davey', 'Davian', 'David',
'Davie', 'Davin', 'Davion', 'Davis', 'Davon', 'Davonta', 'Davonte',
'Davy', 'Dawson', 'Dax', 'Daxton', 'Dayne', 'Dayton', 'Deacon', 'Dean',
'Deandre', 'Deane', 'Deangelo', 'Deante', 'Declan', 'Dedric',
'Dedrick', 'Deegan', 'Deforest', 'Deion', 'Dejon', 'Dejuan', 'Del',
'Delano', 'Delbert', 'Dell', 'Della', 'Delma', 'Delmar', 'Delmas',
'Delmer', 'Delmus', 'Delos', 'Delphin', 'Delton', 'Delvin', 'Delwin',
'Demarco', 'Demarcus', 'Demario', 'Demarion', 'Demetri', 'Demetric',
'Demetrios', 'Demetrius', 'Demian', 'Demond', 'Demonte', 'Dempsey',
'Denis', 'Dennie', 'Dennis', 'Denny', 'Denton', 'Denver', 'Denzel',
'Denzell', 'Denzil', 'Deon', 'Deondre', 'Deonta', 'Deontae', 'Deonte',
'Dequan', 'Derald', 'Dereck', 'Derek', 'Dereon', 'Deric', 'Derick',
'Derik', 'Derl', 'Deron', 'Derrek', 'Derrell', 'Derrick', 'Derwin',
'Deryl', 'Desean', 'Deshaun', 'Deshawn', 'Desi', 'Desmond', 'Dessie',
'Destin', 'Destry', 'Devan', 'Devante', 'Devaughn', 'Deven', 'Devin',
'Devon', 'Devonta', 'Devontae', 'Devonte', 'Devyn', 'Deward',
'Dewayne', 'Dewey', 'Dewitt', 'Dexter', 'Diallo', 'Diamond', 'Diane',
'Dickie', 'Diego', 'Dijon', 'Dilan', 'Dillan', 'Dillard',
'Dillion', 'Dillon', 'Dimitri', 'Dimitrios', 'Dink', 'Dino', 'Dion',
'Dionicio', 'Dionte', 'Dirk', 'Dixon', 'Doc', 'Dock', 'Doctor', 'Doll',
'Dolph', 'Dolphus', 'Domenic', 'Domenick', 'Domenico', 'Domingo',
'Dominic', 'Dominick', 'Dominik', 'Don', 'Donaciano', 'Donal',
'Donald', 'Donat', 'Donato', 'Donavan', 'Donavon', 'Dondre', 'Donell',
'Donn', 'Donnell', 'Donnie', 'Donny', 'Donovan', 'Donta', 'Dontae',
'Donte', 'Dora', 'Dorian', 'Dorman', 'Dorr', 'Dorris', 'Dorsey',
'Doss', 'Doug', 'Douglas', 'Douglass', 'Dow', 'Doyle', 'Dozier',
'Drake', 'Draven', 'Drew', 'Drury', 'Duane', 'Duard', 'Dudley', 'Duff',
'Duke', 'Duncan', 'Durell', 'Durrell', 'Durward', 'Durwood', 'Dustan',
'Dustin', 'Dusty', 'Dustyn', 'Duwayne', 'Dwain', 'Dwaine', 'Dwane',
'Dwayne', 'Dwight', 'Dwyane', 'Dylan', 'Dyllan', 'Dylon', 'Ean',
'Earl', 'Earle', 'Earley', 'Earlie', 'Early', 'Earnest', 'Easton',
'Ebb', 'Ebbie', 'Eben', 'Ebenezer', 'Eber', 'Ebert', 'Ed', 'Edd',
'Eddie', 'Eddy', 'Eden', 'Edgar', 'Edgardo', 'Edie', 'Edison', 'Edmon',
'Edmond', 'Edmund', 'Edsel', 'Edson', 'Eduardo', 'Edw', 'Edward',
'Edwardo', 'Edwin', 'Effie', 'Efrain', 'Efrem', 'Efren', 'Egbert',
'Einar', 'Eino', 'Elam', 'Elbert', 'Elbridge', 'Elby', 'Elden',
'Elder', 'Eldon', 'Eldred', 'Eldridge', 'Elex', 'Elgie', 'Elgin',
'Eli', 'Elian', 'Elias', 'Elick', 'Elie', 'Eliezer', 'Eliga', 'Eligah',
'Elige', 'Elihu', 'Elijah', 'Eliot', 'Eliseo', 'Elisha', 'Elizah',
'Ell', 'Ellery', 'Elliot', 'Elliott', 'Ellis', 'Ellison', 'Ellsworth',
'Ellwood', 'Elmer', 'Elmo', 'Elmore', 'Elon', 'Elonzo', 'Eloy',
'Elroy', 'Elsworth', 'Elton', 'Elvin', 'Elvis', 'Elwin', 'Elwood',
'Elwyn', 'Ely', 'Elza', 'Elzie', 'Elzy', 'Emanuel', 'Emerson', 'Emery',
'Emett', 'Emil', 'Emile', 'Emiliano', 'Emilio', 'Emit', 'Emma',
'Emmanuel', 'Emmet', 'Emmett', 'Emmit', 'Emmitt', 'Emmons', 'Emory',
'Emry', 'Encarnacion', 'Ennis', 'Enoch', 'Enos', 'Enrico', 'Enrique',
'Enzo', 'Ephraim', 'Ephram', 'Ephriam', 'Epifanio', 'Erasmo',
'Erasmus', 'Erastus', 'Erby', 'Eric', 'Erich', 'Erick', 'Erie', 'Erik',
'Erin', 'Erland', 'Erle', 'Erling', 'Ernest', 'Ernesto', 'Ernie',
'Ernst', 'Errol', 'Ervin', 'Erving', 'Erwin', 'Esau', 'Esco',
'Esequiel', 'Esker', 'Esley', 'Essex', 'Esteban', 'Estel', 'Estes',
'Estevan', 'Estill', 'Eston', 'Ethan', 'Ethelbert', 'Ethen', 'Eugene',
'Eugenio', 'Eusebio', 'Eustace', 'Evan', 'Evander', 'Evans', 'Evelyn',
'Everet', 'Everett', 'Everette', 'Evert', 'Evertt', 'Ewald', 'Ewart',
'Ewell', 'Ewin', 'Ewing', 'Ezekiel', 'Ezell', 'Ezequiel', 'Ezra',
'Ezzard', 'Fabian', 'Faron', 'Farrell', 'Farris', 'Fate', 'Faustino',
'Fayette', 'Fed', 'Federico', 'Felipe', 'Felix', 'Felton', 'Fenton',
'Ferd', 'Ferdinand', 'Ferman', 'Fernand', 'Fernando', 'Ferrell',
'Ferris', 'Festus', 'Fidel', 'Fidencio', 'Fielding', 'Finis', 'Finley',
'Finn', 'Finnegan', 'Firman', 'Fisher', 'Fitzgerald', 'Fitzhugh',
'Fleet', 'Flem', 'Fleming', 'Fletcher', 'Flint', 'Florencio',
'Florentino', 'Florian', 'Floy', 'Floyd', 'Foch', 'Ford', 'Forest',
'Forrest', 'Foster', 'Fount', 'Foy', 'Frances', 'Francesco', 'Francis',
'Francisco', 'Franco', 'Frank', 'Frankie', 'Franklin', 'Franklyn',
'Franz', 'Frazier', 'Fred', 'Freddie', 'Freddy', 'Frederic',
'Frederick', 'Fredie', 'Fredric', 'Fredrick', 'Fredy', 'Freeman',
'Fremont', 'French', 'Friend', 'Fritz', 'Fuller', 'Fulton', 'Furman',
'Gabe', 'Gabriel', 'Gael', 'Gaetano', 'Gage', 'Gaige', 'Gail',
'Gaines', 'Gaither', 'Gale', 'Galen', 'Gannon', 'Gardner', 'Garett',
'Garey', 'Garfield', 'Garland', 'Garner', 'Garnet', 'Garnett',
'Garold', 'Garret', 'Garrett', 'Garrick', 'Garrison', 'Garry', 'Garth',
'Garvin', 'Gary', 'Gasper', 'Gaston', 'Gauge', 'Gaven', 'Gavin',
'Gavyn', 'Gay', 'Gayle', 'Gaylen', 'Gaylon', 'Gaylord', 'Gearld',
'Geary', 'Gee', 'Genaro', 'Gene', 'General', 'Genie', 'Gennaro',
'Geno', 'Geo', 'Geoff', 'Geoffrey', 'George', 'Georgie', 'Geovanni',
'Gerald', 'Geraldo', 'Gerard', 'Gerardo', 'Gerhard', 'Gerhardt',
'Germaine', 'German', 'Gerold', 'Gerrit', 'Gerry', 'Giancarlo',
'Gianni', 'Gibson', 'Gideon', 'Gifford', 'Gil', 'Gilbert', 'Gilberto',
'Giles', 'Gilford', 'Gilman', 'Gilmer', 'Gilmore', 'Gino', 'Giovani',
'Giovanni', 'Giovanny', 'Giuseppe', 'Gladstone', 'Glen', 'Glendon',
'Glenn', 'Glenwood', 'Glover', 'Glynn', 'Godfrey', 'Goebel', 'Golden',
'Gonzalo', 'Gorden', 'Gordon', 'Gorge', 'Gottlieb', 'Governor',
'Grady', 'Grafton', 'Graham', 'Grant', 'Granville', 'Graves', 'Gray',
'Graydon', 'Grayling', 'Grayson', 'Green', 'Greene', 'Greg', 'Gregg',
'Greggory', 'Gregorio', 'Gregory', 'Greyson', 'Griffin', 'Griffith',
'Grove', 'Grover', 'Guido', 'Guilford', 'Guillermo', 'Gunnar',
'Gunner', 'Gurney', 'Gus', 'Guss', 'Gussie', 'Gust', 'Gustaf',
'Gustav', 'Gustave', 'Gustavo', 'Gustavus', 'Guthrie', 'Guy', 'Haden',
'Hadley', 'Haiden', 'Hakeem', 'Hakim', 'Hal', 'Halbert', 'Hale',
'Hall', 'Halley', 'Hallie', 'Halsey', 'Ham', 'Hamilton', 'Hamp',
'Hampton', 'Hamza', 'Handy', 'Hank', 'Hans', 'Hansel', 'Hansford',
'Hanson', 'Harden', 'Hardie', 'Hardin', 'Harding', 'Hardy', 'Harl',
'Harlan', 'Harland', 'Harlen', 'Harley', 'Harlie', 'Harlon', 'Harlow',
'Harm', 'Harman', 'Harmon', 'Harold', 'Harper', 'Harrell', 'Harrie',
'Harris', 'Harrison', 'Harrold', 'Harry', 'Hart', 'Hartley',
'Hartwell', 'Harve', 'Harvey', 'Harvie', 'Harvy', 'Hasan', 'Haskell',
'Hassan', 'Hattie', 'Haven', 'Hayden', 'Hayes', 'Hays', 'Hayward',
'Haywood', 'Hazen', 'Heath', 'Heber', 'Hebert', 'Hector', 'Helmer',
'Hence', 'Henderson', 'Henery', 'Henri', 'Henry', 'Herb', 'Herbert',
'Heriberto', 'Herman', 'Hermann', 'Hermon', 'Hernan', 'Herschel',
'Hershel', 'Hershell', 'Hervey', 'Heyward', 'Hezekiah', 'Hezzie',
'Hideo', 'Hilario', 'Hilary', 'Hilbert', 'Hill', 'Hillard', 'Hillary',
'Hillery', 'Hilliard', 'Hilmer', 'Hilton', 'Hiram', 'Hiroshi',
'Hjalmar', 'Hjalmer', 'Hobart', 'Hobert', 'Hobson', 'Hoke', 'Holden',
'Holland', 'Hollie', 'Hollis', 'Holmes', 'Homer', 'Hoover', 'Hope',
'Horace', 'Horacio', 'Horatio', 'Horton', 'Hosea', 'Hosie', 'Hosteen',
'Houston', 'Howard', 'Howell', 'Hoy', 'Hoyt', 'Hubbard', 'Hubert',
'Hudson', 'Huey', 'Hugh', 'Hughes', 'Hughey', 'Hughie', 'Hugo',
'Humberto', 'Humphrey', 'Hung', 'Hunt', 'Hunter', 'Hurbert', 'Hurley',
'Huston', 'Huy', 'Hyman', 'Hymen', 'Hyrum', 'Ian', 'Ibrahim', 'Ida',
'Ignacio', 'Ignatius', 'Ignatz', 'Ike', 'Illya', 'Imanol', 'Immanuel',
'Infant', 'Ingram', 'Ira', 'Irl', 'Irven', 'Irvin', 'Irvine', 'Irving',
'Irwin', 'Isaac', 'Isaak', 'Isadore', 'Isai', 'Isaiah', 'Isaias',
'Isam', 'Ishaan', 'Isham', 'Ishmael', 'Isiah', 'Isidor', 'Isidore',
'Isidro', 'Ismael', 'Isom', 'Israel', 'Isreal', 'Issac', 'Iva', 'Ivan',
'Iver', 'Iverson', 'Ivey', 'Ivor', 'Ivory', 'Ivy', 'Izaiah', 'Izayah',
'Jabari', 'Jabbar', 'Jabez', 'Jace', 'Jack', 'Jackson', 'Jacky',
'Jacob', 'Jacoby', 'Jacques', 'Jacquez', 'Jade', 'Jaden', 'Jadiel',
'Jadon', 'Jadyn', 'Jaeden', 'Jagger', 'Jaheem', 'Jaheim', 'Jahiem',
'Jahir', 'Jaiden', 'Jaidyn', 'Jaime', 'Jaimie', 'Jair', 'Jairo',
'Jajuan', 'Jake', 'Jakob', 'Jakobe', 'Jaleel', 'Jalen', 'Jalon',
'Jamaal', 'Jamal', 'Jamar', 'Jamarcus', 'Jamari', 'Jamarion', 'Jame',
'Jameel', 'Jamel', 'James', 'Jameson', 'Jamey', 'Jamie', 'Jamil',
'Jamin', 'Jamir', 'Jamison', 'Jammie', 'Jan', 'Jaquan', 'Jaquez',
'Jarad', 'Jared', 'Jaren', 'Jaret', 'Jarett', 'Jarod', 'Jaron',
'Jarrad', 'Jarred', 'Jarrell', 'Jarret', 'Jarrett', 'Jarrod', 'Jarvis',
'Jase', 'Jasen', 'Jasiah', 'Jason', 'Jasper', 'Javen', 'Javier',
'Javion', 'Javon', 'Javonte', 'Jax', 'Jaxen', 'Jaxon', 'Jaxson',
'Jaxton', 'Jay', 'Jayce', 'Jaycob', 'Jaydan', 'Jayden', 'Jaydin',
'Jaydon', 'Jaylan', 'Jaylen', 'Jaylin', 'Jaylon', 'Jayme', 'Jaymes',
'Jayson', 'Jayvion', 'Jayvon', 'Jean', 'Jeb', 'Jed', 'Jedediah',
'Jedidiah', 'Jeff', 'Jefferey', 'Jefferson', 'Jeffery', 'Jeffie',
'Jeffrey', 'Jeffry', 'Jelani', 'Jemal', 'Jennings', 'Jens', 'Jensen',
'Jep', 'Jeptha', 'Jerad', 'Jerald', 'Jeramiah', 'Jeramie', 'Jeramy',
'Jere', 'Jered', 'Jerel', 'Jereme', 'Jeremey', 'Jeremiah', 'Jeremie',
'Jeremy', 'Jerimiah', 'Jerimy', 'Jermain', 'Jermaine', 'Jermey',
'Jerod', 'Jerold', 'Jerome', 'Jeromy', 'Jerrad', 'Jerrel', 'Jerrell',
'Jerrod', 'Jerrold', 'Jerry', 'Jess', 'Jesse', 'Jessee', 'Jessie',
'Jessy', 'Jesus', 'Jethro', 'Jett', 'Jettie', 'Jevon', 'Jewell',
'Jiles', 'Jim', 'Jimmie', 'Jimmy', 'Joaquin', 'Job', 'Jobe', 'Joe',
'Joel', 'Joeseph', 'Joesph', 'Joey', 'Johan', 'Johathan', 'John',
'Johnathan', 'Johnathon', 'Johney', 'Johnie', 'Johnnie', 'Johnny',
'Johnpaul', 'Johnson', 'Johny', 'Jon', 'Jonah', 'Jonas', 'Jonatan',
'Jonathan', 'Jonathon', 'Jones', 'Jonnie', 'Jordan', 'Jorden', 'Jordi',
'Jordon', 'Jordy', 'Jordyn', 'Jorge', 'Jory', 'Jose', 'Josef',
'Joseluis', 'Joseph', 'Josephus', 'Josh', 'Joshua', 'Joshuah',
'Josiah', 'Josue', 'Jovan', 'Jovani', 'Jovanni', 'Jovanny', 'Jovany',
'Joy', 'Juan', 'Judah', 'Judd', 'Jude', 'Judge', 'Judson', 'Juelz',
'Jule', 'Jules', 'Julian', 'Julien', 'Julio', 'Julious', 'Julius',
'Juluis', 'Junior', 'Junious', 'Junius', 'Justen', 'Justice', 'Justin',
'Juston', 'Justus', 'Justyn', 'Juwan', 'Kade', 'Kadeem', 'Kaden',
'Kadin', 'Kadyn', 'Kaeden', 'Kael', 'Kahlil', 'Kai', 'Kaiden', 'Kale',
'Kaleb', 'Kalel', 'Kalen', 'Kalvin', 'Kamari', 'Kamden', 'Kameron',
'Kamren', 'Kamron', 'Kamryn', 'Kane', 'Kanye', 'Kareem', 'Kareen',
'Karim', 'Karl', 'Karson', 'Karter', 'Kasen', 'Kasey', 'Kash', 'Kason',
'Kavon', 'Kayden', 'Kaye', 'Kayson', 'Kazuo', 'Keagan', 'Keandre',
'Keanu', 'Keaton', 'Keegan', 'Keenan', 'Keenen', 'Kegan', 'Keifer',
'Keion', 'Keith', 'Kelan', 'Kelby', 'Kellan', 'Kellen', 'Kelley',
'Kelly', 'Kelsey', 'Kelton', 'Kelvin', 'Kem', 'Ken', 'Kenan', 'Kendal',
'Kendall', 'Kendell', 'Kendrick', 'Kenji', 'Kennard', 'Kennedy',
'Kenneth', 'Kenney', 'Kennith', 'Kennth', 'Kenny', 'Kent', 'Kenton',
'Kenya', 'Kenyatta', 'Kenyon', 'Keon', 'Kermit', 'Kerry', 'Kerwin',
'Keshaun', 'Keshawn', 'Kevan', 'Keven', 'Kevin', 'Kevon', 'Keyon',
'Keyshawn', 'Khalid', 'Khalil', 'Khari', 'Khiry', 'Kian', 'Kiara',
'Kiefer', 'Kiel', 'Kieran', 'Kieth', 'Kiley', 'Killian', 'Kim',
'Kimball', 'Kimberly', 'King', 'Kingston', 'Kinte', 'Kip', 'Kipp',
'Kirby', 'Kirk', 'Kirt', 'Kit', 'Kiyoshi', 'Knox', 'Knute', 'Kobe',
'Koby', 'Koda', 'Kody', 'Koen', 'Kolby', 'Kole', 'Kolten', 'Kolton',
'Konner', 'Konnor', 'Korbin', 'Kordell', 'Korey', 'Kory', 'Kraig',
'Kris', 'Krish', 'Kristen', 'Kristian', 'Kristin', 'Kristofer',
'Kristoffer', 'Kristopher', 'Kunta', 'Kurt', 'Kurtis', 'Kwame', 'Kyan',
'Kylan', 'Kyle', 'Kyler', 'Kymani', 'Kyree', 'Kyson', 'Lacey', 'Lacy',
'Ladarius', 'Laddie', 'Lafayette', 'Lafe', 'Lamar', 'Lamarcus',
'Lambert', 'Lamont', 'Lamonte', 'Lance', 'Landan', 'Landen', 'Landin',
'Landon', 'Landyn', 'Lane', 'Lannie', 'Lanny', 'Laquan', 'Lark',
'Larkin', 'Laron', 'Larry', 'Lars', 'Larue', 'Lary', 'Lashawn',
'Latrell', 'Laurance', 'Laurel', 'Laurence', 'Lavar', 'Lavern',
'Laverne', 'Lavon', 'Lawerence', 'Lawrance', 'Lawrence', 'Lawson',
'Lawton', 'Lawyer', 'Layne', 'Layton', 'Lazaro', 'Le', 'Lea', 'Leamon',
'Leander', 'Leandro', 'Lee', 'Leeroy', 'Leif', 'Leigh', 'Leighton',
'Leland', 'Lem', 'Lemmie', 'Lemon', 'Lemuel', 'Len', 'Lena', 'Lenard',
'Lennie', 'Lennon', 'Lenny', 'Lenon', 'Lenord', 'Lenwood', 'Leo',
'Leon', 'Leonard', 'Leonardo', 'Leonce', 'Leonel', 'Leonidas',
'Leopold', 'Leopoldo', 'Leroy', 'Les', 'Lesley', 'Leslie', 'Less',
'Lessie', 'Lester', 'Levar', 'Levern', 'Levi', 'Levie', 'Levin',
'Levon', 'Levy', 'Lew', 'Lewis', 'Lex', 'Lexie', 'Liam', 'Lige',
'Lilburn', 'Lillard', 'Lim', 'Lincoln', 'Lindbergh', 'Lindell',
'Linden', 'Lindsay', 'Lindsey', 'Lindy', 'Link', 'Linn', 'Linnie',
'Linton', 'Linus', 'Linwood', 'Linzy', 'Lionel', 'Lisandro', 'Lish',
'Lisle', 'Liston', 'Little', 'Littleton', 'Llewellyn', 'Lloyd',
'Logan', 'Lon', 'London', 'Lone', 'Loney', 'Long', 'Lonie', 'Lonnie',
'Lonny', 'Lonzo', 'Lora', 'Loran', 'Loren', 'Lorenz', 'Lorenza',
'Lorenzo', 'Lorin', 'Loring', 'Lorne', 'Lott', 'Lou', 'Louie', 'Louis',
'Love', 'Lovell', 'Lovett', 'Lovie', 'Lowell', 'Loy', 'Loyal', 'Loyd',
'Luc', 'Luca', 'Lucas', 'Lucian', 'Luciano', 'Lucien', 'Lucio',
'Lucious', 'Lucius', 'Lucky', 'Ludwig', 'Lue', 'Luigi', 'Luis', 'Luka',
'Lukas', 'Luke', 'Lula', 'Lum', 'Lupe', 'Luster', 'Lute', 'Luther',
'Luverne', 'Lydell', 'Lyle', 'Lyman', 'Lyn', 'Lyndon', 'Lynn',
'Lynwood', 'Lyric', 'Mac', 'Macarthur', 'Mace', 'Maceo', 'Mack',
'Mackenzie', 'Madden', 'Maddox', 'Maddux', 'Madison', 'Mae', 'Mahlon',
'Major', 'Makai', 'Makhi', 'Mal', 'Malachi', 'Malakai', 'Malaki',
'Malcolm', 'Malcom', 'Male', 'Malik', 'Malvin', 'Mamie', 'Manford',
'Manley', 'Manly', 'Mannie', 'Manning', 'Mansfield', 'Manson',
'Manuel', 'Marc', 'Marcel', 'Marcelino', 'Marcell', 'Marcello',
'Marcellus', 'Marcelo', 'Marchello', 'Marco', 'Marcos', 'Marcus',
'Margarito', 'Mariano', 'Mario', 'Marion', 'Marius', 'Mark', 'Markel',
'Markell', 'Markus', 'Marland', 'Marley', 'Marlin', 'Marlo', 'Marlon',
'Marlyn', 'Marques', 'Marquez', 'Marquis', 'Marquise', 'Marrion',
'Marsh', 'Marshal', 'Marshall', 'Mart', 'Martell', 'Martez', 'Martin',
'Marty', 'Marvin', 'Masao', 'Mason', 'Mat', 'Mateo', 'Math', 'Mathew',
'Mathews', 'Mathias', 'Matias', 'Matt', 'Matteo', 'Matthew',
'Matthias', 'Maurice', 'Mauricio', 'Mauro', 'Maury', 'Maverick', 'Max',
'Maxie', 'Maxim', 'Maximilian', 'Maximiliano', 'Maximillian', 'Maximo',
'Maximus', 'Maxwell', 'Maxx', 'May', 'Maynard', 'Mayo', 'Mcarthur',
'Mckinley', 'Mearl', 'Mekhi', 'Mel', 'Melbourne', 'Mell', 'Melton',
'Melville', 'Melvin', 'Melvyn', 'Memphis', 'Menachem', 'Mercer',
'Merl', 'Merle', 'Merlin', 'Merlyn', 'Merrill', 'Merritt', 'Merton',
'Mervin', 'Mervyn', 'Merwin', 'Messiah', 'Metro', 'Meyer', 'Micah',
'Michael', 'Michal', 'Michale', 'Micheal', 'Michel', 'Michial',
'Mickey', 'Micky', 'Miguel', 'Miguelangel', 'Mikal', 'Mike', 'Mikeal',
'Mikel', 'Mikhail', 'Milan', 'Milas', 'Milburn', 'Miles', 'Milford',
'Millard', 'Miller', 'Mills', 'Milo', 'Milton', 'Miner', 'Minor',
'Minoru', 'Misael', 'Mitch', 'Mitchel', 'Mitchell', 'Moe', 'Mohamed',
'Mohammad', 'Mohammed', 'Moises', 'Monroe', 'Mont', 'Montana', 'Monte',
'Montel', 'Montgomery', 'Montie', 'Montrell', 'Monty', 'Moody',
'Mordechai', 'Morgan', 'Morris', 'Mortimer', 'Morton', 'Mose', 'Moses',
'Moshe', 'Muhammad', 'Murdock', 'Murl', 'Murphy', 'Murray', 'Murry',
'Mustafa', 'Mychal', 'Myer', 'Mykel', 'Myles', 'Myrl', 'Myron',
'Myrtle', 'Najee', 'Nakia', 'Namon', 'Napoleon', 'Nash', 'Nasir',
'Nat', 'Nathan', 'Nathanael', 'Nathanial', 'Nathaniel', 'Nathen',
'Neal', 'Ned', 'Needham', 'Neely', 'Nehemiah', 'Neil', 'Nello', 'Nels',
'Nelson', 'Nery', 'Nestor', 'Nevin', 'Newell', 'Newman', 'Newt',
'Newton', 'Nicholas', 'Nicholaus', 'Nick', 'Nicklaus', 'Nickolas',
'Nicky', 'Nico', 'Nicolas', 'Nigel', 'Nikhil', 'Nikko', 'Niko',
'Nikolai', 'Nikolas', 'Nile', 'Niles', 'Nils', 'Nim', 'Noah', 'Noble',
'Noe', 'Noel', 'Nolan', 'Nolen', 'Norbert', 'Norberto', 'Norman',
'Normand', 'Norris', 'North', 'Norton', 'Norval', 'Norwood', 'Nunzio',
'Oakley', 'Obe', 'Obed', 'Obie', 'Ocie', 'Octave', 'Octavio',
'Octavius', 'Oda', 'Oddie', 'Odell', 'Odie', 'Odin', 'Odis', 'Odus',
'Offie', 'Ogden', 'Okey', 'Ola', 'Olaf', 'Olan', 'Oland', 'Ole',
'Olen', 'Oley', 'Olie', 'Olin', 'Oliver', 'Ollie', 'Olof', 'Omar',
'Omari', 'Omarion', 'Omer', 'Oneal', 'Ora', 'Oral', 'Oran', 'Orange',
'Oren', 'Orie', 'Orin', 'Orion', 'Oris', 'Orla', 'Orland', 'Orlando',
'Orley', 'Orlin', 'Orlo', 'Orren', 'Orrie', 'Orrin', 'Orris', 'Orson',
'Orval', 'Orvel', 'Orvil', 'Orville', 'Orvin', 'Orvis', 'Osbaldo',
'Osborn', 'Osborne', 'Oscar', 'Osie', 'Ossie', 'Osvaldo', 'Oswald',
'Oswaldo', 'Otha', 'Othel', 'Otho', 'Otis', 'Ott', 'Ottie', 'Ottis',
'Otto', 'Ova', 'Ovid', 'Ovila', 'Owen', 'Owens', 'Ozell', 'Ozie',
'Ozzie', 'Pablo', 'Page', 'Palmer', 'Paris', 'Park', 'Parker',
'Parley', 'Parrish', 'Pascal', 'Pasquale', 'Pat', 'Pate', 'Patric',
'Patrick', 'Paul', 'Paulo', 'Paxton', 'Payton', 'Pearley', 'Pedro',
'Percival', 'Percy', 'Perley', 'Pernell', 'Perry', 'Pershing', 'Pete',
'Peter', 'Peyton', 'Phil', 'Philip', 'Phillip', 'Philo', 'Phoenix',
'Pierce', 'Pierre', 'Pinkney', 'Pleas', 'Pleasant', 'Ples', 'Plummer',
'Polk', 'Porfirio', 'Porter', 'Posey', 'Powell', 'Pranav', 'Pratt',
'Prentice', 'Prentiss', 'Presley', 'Press', 'Preston', 'Price',
'Primus', 'Prince', 'Prosper', 'Pryor', 'Purl', 'Quentin', 'Quincy',
'Quinn', 'Quint', 'Quinten', 'Quintin', 'Quinton', 'Rae', 'Raekwon',
'Rafael', 'Rafe', 'Raheem', 'Rahn', 'Rahsaan', 'Rahul', 'Raiden',
'Rakeem', 'Raleigh', 'Ralph', 'Ramiro', 'Ramon', 'Ramsey', 'Rance',
'Rand', 'Randal', 'Randall', 'Randel', 'Randell', 'Randle', 'Randolf',
'Randolph', 'Randy', 'Ransom', 'Raoul', 'Raphael', 'Raquan', 'Ras',
'Rashaad', 'Rashaan', 'Rashad', 'Rashawn', 'Rasheed', 'Raul', 'Raven',
'Ray', 'Rayan', 'Rayburn', 'Rayfield', 'Rayford', 'Raymon', 'Raymond',
'Raymundo', 'Raynard', 'Rayshawn', 'Reagan', 'Reason', 'Red', 'Redden',
'Redmond', 'Reece', 'Reed', 'Reese', 'Refugio', 'Regan', 'Reggie',
'Reginal', 'Reginald', 'Regis', 'Reid', 'Reilly', 'Reinaldo',
'Reinhold', 'Reino', 'Remington', 'Remy', 'Renaldo', 'Renard', 'Rene',
'Reno', 'Reuben', 'Reubin', 'Rex', 'Rexford', 'Rey', 'Reyes',
'Reynaldo', 'Reynold', 'Reynolds', 'Rhett', 'Rhoda', 'Rhys', 'Rian',
'Ricardo', 'Ricci', 'Rice', 'Rich', 'Richard', 'Richie', 'Richmond',
'Rick', 'Rickey', 'Ricki', 'Rickie', 'Ricky', 'Rico', 'Ridge',
'Rigoberto', 'Riley', 'Rishi', 'Ritchie', 'River', 'Rob', 'Robb',
'Robbie', 'Robbin', 'Robby', 'Robert', 'Roberto', 'Robin', 'Robley',
'Robt', 'Roby', 'Rocco', 'Rock', 'Rocky', 'Rod', 'Roddy', 'Roderic',
'Roderick', 'Rodger', 'Rodney', 'Rodolfo', 'Rodrick', 'Rodrigo', 'Roe',
'Roel', 'Rogelio', 'Roger', 'Rogers', 'Rohan', 'Roland', 'Rolando',
'Rolf', 'Roll', 'Rolla', 'Rolland', 'Rollie', 'Rollin', 'Rollo',
'Roma', 'Roman', 'Rome', 'Romello', 'Romeo', 'Romie', 'Ron', 'Ronal',
'Ronald', 'Ronaldo', 'Ronan', 'Rondal', 'Ronin', 'Ronnie', 'Ronny',
'Roosevelt', 'Rory', 'Rosario', 'Rosco', 'Roscoe', 'Rosendo',
'Rosevelt', 'Ross', 'Rossie', 'Roswell', 'Rowan', 'Rowland', 'Roy',
'Royal', 'Royce', 'Rube', 'Ruben', 'Rubin', 'Ruby', 'Rudolf',
'Rudolfo', 'Rudolph', 'Rudy', 'Rueben', 'Ruel', 'Ruffin', 'Ruffus',
'Rufus', 'Rupert', 'Rush', 'Russ', 'Russel', 'Russell', 'Rustin',
'Rusty', 'Rutherford', 'Ryan', 'Ryder', 'Ryker', 'Rylan', 'Ryland',
'Rylee', 'Ryley', 'Ryne', 'Sabastian', 'Sage', 'Saint', 'Sal',
'Salomon', 'Salvador', 'Salvatore', 'Sam', 'Samie', 'Samir', 'Sammie',
'Sammy', 'Sampson', 'Samson', 'Samual', 'Samuel', 'Sanders', 'Sandy',
'Sanford', 'Santana', 'Santiago', 'Santino', 'Santo', 'Santos', 'Saul',
'Saverio', 'Savion', 'Savon', 'Sawyer', 'Schley', 'Schuyler', 'Scot',
'Scott', 'Scottie', 'Scotty', 'Seaborn', 'Seamus', 'Sean', 'Sebastian',
'Sedrick', 'Seldon', 'Selmer', 'Semaj', 'Seneca', 'Sergio', 'Seth',
'Severo', 'Severt', 'Seward', 'Seymour', 'Shad', 'Shade', 'Shafter',
'Shamar', 'Shan', 'Shane', 'Shannon', 'Shanon', 'Shaquan', 'Shaquille',
'Sharif', 'Sharon', 'Shaun', 'Shawn', 'Shay', 'Shayne', 'Shea',
'Shedrick', 'Shelby', 'Sheldon', 'Shelley', 'Shellie', 'Shelly',
'Shelton', 'Shemar', 'Shep', 'Shepherd', 'Sheridan', 'Sherman',
'Sherrill', 'Sherwin', 'Sherwood', 'Shirley', 'Shoji', 'Shon',
'Shyheim', 'Sid', 'Sidney', 'Sie', 'Sigmund', 'Sigurd', 'Silas',
'Silver', 'Silvester', 'Silvio', 'Sim', 'Simeon', 'Simmie', 'Simon',
'Simpson', 'Sincere', 'Sing', 'Skip', 'Skylar', 'Skyler', 'Slade',
'Smith', 'Sol', 'Soloman', 'Solomon', 'Solon', 'Son', 'Sonny', 'Soren',
'Spencer', 'Spenser', 'Spurgeon', 'Squire', 'Stacey', 'Stacy',
'Stafford', 'Stan', 'Stanford', 'Stanislaus', 'Stanley', 'Stanton',
'Starling', 'Stefan', 'Stephan', 'Stephanie', 'Stephen', 'Stephon',
'Sterling', 'Stetson', 'Stevan', 'Steve', 'Steven', 'Stevie',
'Steward', 'Stewart', 'Stone', 'Stonewall', 'Stoney', 'Storm',
'Stuart', 'Sullivan', 'Sumner', 'Susie', 'Sydney', 'Syed', 'Sylas',
'Sylvan', 'Sylvanus', 'Sylvester', 'Tab', 'Tad', 'Taft', 'Tahj', 'Taj',
'Tal', 'Talan', 'Talen', 'Tallie', 'Talmadge', 'Talmage', 'Talon',
'Tandy', 'Tanner', 'Tarik', 'Tariq', 'Tate', 'Tatsuo', 'Taurean',
'Taurus', 'Tavares', 'Tavaris', 'Tavian', 'Tavion', 'Tavon', 'Tayler',
'Taylor', 'Tayshaun', 'Teagan', 'Ted', 'Teddie', 'Teddy', 'Tegan',
'Telly', 'Terance', 'Terell', 'Terence', 'Terrance', 'Terrell',
'Terrence', 'Terrill', 'Terry', 'Tevin', 'Tex', 'Thad', 'Thaddeus',
'Theadore', 'Thedore', 'Theo', 'Theodis', 'Theodore', 'Theophile',
'Therman', 'Theron', 'Thomas', 'Thompson', 'Thor', 'Thornton',
'Thorwald', 'Thos', 'Thurlow', 'Thurman', 'Thurston', 'Tilden',
'Tillman', 'Tilman', 'Tim', 'Timmie', 'Timmothy', 'Timmy', 'Timothy',
'Tito', 'Titus', 'Tobe', 'Tobias', 'Tobie', 'Tobin', 'Toby', 'Tod',
'Todd', 'Toivo', 'Tolbert', 'Tollie', 'Tom', 'Toma', 'Tomas', 'Tomie',
'Tommie', 'Tommy', 'Toney', 'Tony', 'Torey', 'Toriano', 'Torrance',
'Torrence', 'Torrey', 'Torry', 'Tory', 'Toshio', 'Toy', 'Trace',
'Tracey', 'Tracy', 'Trae', 'Travis', 'Travon', 'Trayvon', 'Tre',
'Tremaine', 'Tremayne', 'Trent', 'Trenten', 'Trenton', 'Trever',
'Trevin', 'Trevion', 'Trevon', 'Trevor', 'Trey', 'Treyton', 'Treyvon',
'Trinidad', 'Trinity', 'Tripp', 'Tristan', 'Tristen', 'Tristian',
'Tristin', 'Triston', 'Troy', 'True', 'Trumaine', 'Truman', 'Trystan',
'Tuan', 'Tucker', 'Turner', 'Ty', 'Tye', 'Tyler', 'Tylor', 'Tyquan',
'Tyree', 'Tyreek', 'Tyreese', 'Tyrek', 'Tyreke', 'Tyrel', 'Tyrell',
'Tyrese', 'Tyrik', 'Tyrin', 'Tyriq', 'Tyrique', 'Tyron', 'Tyrone',
'Tyrus', 'Tyshawn', 'Tyson', 'Ulises', 'Ulysses', 'Unknown', 'Unnamed',
'Urban', 'Uriah', 'Uriel', 'Urijah', 'Val', 'Valentin', 'Valentine',
'Valentino', 'Van', 'Vance', 'Vander', 'Vashon', 'Vaughn', 'Vera',
'Vere', 'Vergil', 'Verl', 'Verle', 'Verlin', 'Verlon', 'Verlyn',
'Vern', 'Verna', 'Vernal', 'Verne', 'Vernell', 'Verner', 'Vernie',
'Vernon', 'Vester', 'Vic', 'Vicente', 'Vick', 'Victor', 'Victoriano',
'Vidal', 'Vince', 'Vincent', 'Vincenzo', 'Vinson', 'Vinton', 'Virge',
'Virgel', 'Virgie', 'Virgil', 'Virgle', 'Vito', 'Vollie', 'Volney',
'Von', 'Wade', 'Waino', 'Waldemar', 'Waldo', 'Walker', 'Wallace',
'Wally', 'Walt', 'Walter', 'Walton', 'Ward', 'Wardell', 'Warner',
'Warren', 'Wash', 'Washington', 'Watson', 'Watt', 'Waverly', 'Wayde',
'Wayland', 'Waylon', 'Wayman', 'Waymon', 'Wayne', 'Weaver', 'Webb',
'Webster', 'Weldon', 'Wellington', 'Wells', 'Welton', 'Wendel',
'Wendell', 'Wenzel', 'Werner', 'Wes', 'Wesley', 'Wess', 'West',
'Westin', 'Westley', 'Weston', 'Wheeler', 'Whit', 'Whitney', 'Wilber',
'Wilbert', 'Wilbur', 'Wilburn', 'Wiley', 'Wilford', 'Wilfred',
'Wilfredo', 'Wilfrid', 'Wilhelm', 'Wiliam', 'Wilkie', 'Will',
'Willaim', 'Willam', 'Willard', 'William', 'Williams', 'Willian',
'Williard', 'Willie', 'Willis', 'Willy', 'Wilmer', 'Wilson', 'Wilton',
'Windell', 'Winfield', 'Winford', 'Winfred', 'Wing', 'Winifred',
'Winnie', 'Winston', 'Winthrop', 'Winton', 'Wirt', 'Wm', 'Wong',
'Wood', 'Woodie', 'Woodroe', 'Woodrow', 'Woodson', 'Woody', 'Worley',
'Worth', 'Wright', 'Wyatt', 'Wylie', 'Wyman', 'Xander', 'Xavier',
'Xzavier', 'Yaakov', 'Yadiel', 'Yael', 'Yahir', 'Yair', 'Yancy',
'Yandel', 'Yee', 'Yehuda', 'Yoel', 'York', 'Yosef', 'Yoshio', 'Young',
'Yurem', 'Yusuf', 'Zachariah', 'Zachary', 'Zachery', 'Zack', 'Zackary',
'Zackery', 'Zaid', 'Zaiden', 'Zain', 'Zaire', 'Zakary', 'Zander',
'Zane', 'Zavier', 'Zavion', 'Zayden', 'Zayne', 'Zeb', 'Zebulon',
'Zechariah', 'Zed', 'Zeke', 'Zenas', 'Zeno', 'Zigmund', 'Zion',
'Zollie'
)
first_names = first_names_male + first_names_female
last_names = (
'Abbott', 'Abernathy', 'Abshire', 'Adams', 'Altenwerth', 'Anderson', 'Ankunding', 'Armstrong', 'Auer',
'Aufderhar',
'Bahringer', 'Bailey', 'Balistreri', 'Barrows', 'Bartell', 'Bartoletti', 'Barton', 'Bashirian', 'Batz', 'Bauch',
'Baumbach', 'Bayer', 'Beahan', 'Beatty', 'Bechtelar', 'Becker', 'Bednar', 'Beer', 'Beier', 'Berge', 'Bergnaum',
'Bergstrom', 'Bernhard', 'Bernier', 'Bins', 'Blanda', 'Blick', 'Block', 'Bode', 'Boehm', 'Bogan', 'Bogisich',
'Borer', 'Bosco', 'Botsford', 'Boyer', 'Boyle', 'Bradtke', 'Brakus', 'Braun', 'Breitenberg', 'Brekke', 'Brown',
'Bruen', 'Buckridge',
'Carroll', 'Carter', 'Cartwright', 'Casper', 'Cassin', 'Champlin', 'Christiansen', 'Cole', 'Collier', 'Collins',
'Conn', 'Connelly', 'Conroy', 'Considine', 'Corkery', 'Cormier', 'Corwin', 'Cremin', 'Crist', 'Crona', 'Cronin',
'Crooks', 'Cruickshank', 'Cummerata', 'Cummings',
'Dach', 'D\'Amore', 'Daniel', 'Dare', 'Daugherty', 'Davis', 'Deckow', 'Denesik', 'Dibbert', 'Dickens', 'Dicki',
'Dickinson', 'Dietrich', 'Donnelly', 'Dooley', 'Douglas', 'Doyle', 'DuBuque', 'Durgan',
'Ebert', 'Effertz', 'Eichmann', 'Emard', 'Emmerich', 'Erdman', 'Ernser', 'Fadel',
'Fahey', 'Farrell', 'Fay', 'Feeney', 'Feest', 'Feil', 'Ferry', 'Fisher', 'Flatley', 'Frami', 'Franecki',
'Friesen', 'Fritsch', 'Funk',
'Gaylord', 'Gerhold', 'Gerlach', 'Gibson', 'Gislason', 'Gleason', 'Gleichner', 'Glover', 'Goldner', 'Goodwin',
'Gorczany', 'Gottlieb', 'Goyette', 'Grady', 'Graham', 'Grant', 'Green', 'Greenfelder', 'Greenholt', 'Grimes',
'Gulgowski', 'Gusikowski', 'Gutkowski', 'Gutmann',
'Haag', 'Hackett', 'Hagenes', 'Hahn', 'Haley', 'Halvorson', 'Hamill', 'Hammes', 'Hand', 'Hane', 'Hansen',
'Harber', 'Harris', 'Hartmann', 'Harvey', 'Hauck', 'Hayes', 'Heaney', 'Heathcote', 'Hegmann', 'Heidenreich',
'Heller', 'Herman', 'Hermann', 'Hermiston', 'Herzog', 'Hessel', 'Hettinger', 'Hickle', 'Hilll', 'Hills',
'Hilpert', 'Hintz', 'Hirthe', 'Hodkiewicz', 'Hoeger', 'Homenick', 'Hoppe', 'Howe', 'Howell', 'Hudson', 'Huel',
'Huels', 'Hyatt',
'Jacobi', 'Jacobs', 'Jacobson', 'Jakubowski', 'Jaskolski', 'Jast', 'Jenkins', 'Jerde', 'Johns',
'Johnson', 'Johnston', 'Jones',
'Kassulke', 'Kautzer', 'Keebler', 'Keeling', 'Kemmer', 'Kerluke', 'Kertzmann', 'Kessler', 'Kiehn', 'Kihn',
'Kilback', 'King', 'Kirlin', 'Klein', 'Kling', 'Klocko', 'Koch', 'Koelpin', 'Koepp', 'Kohler', 'Konopelski',
'Koss', 'Kovacek', 'Kozey', 'Krajcik', 'Kreiger', 'Kris', 'Kshlerin', 'Kub', 'Kuhic', 'Kuhlman', 'Kuhn',
'Kulas', 'Kunde', 'Kunze', 'Kuphal', 'Kutch', 'Kuvalis',
'Labadie', 'Lakin', 'Lang', 'Langosh', 'Langworth', 'Larkin', 'Larson', 'Leannon', 'Lebsack', 'Ledner',
'Leffler', 'Legros', 'Lehner', 'Lemke', 'Lesch', 'Leuschke', 'Lind', 'Lindgren', 'Littel', 'Little', 'Lockman',
'Lowe', 'Lubowitz', 'Lueilwitz', 'Luettgen', 'Lynch',
'Macejkovic', 'Maggio', 'Mann', 'Mante', 'Marks', 'Marquardt', 'Marvin', 'Mayer', 'Mayert', 'McClure',
'McCullough', 'McDermott', 'McGlynn', 'McKenzie', 'McLaughlin', 'Medhurst', 'Mertz', 'Metz', 'Miller', 'Mills',
'Mitchell', 'Moen', 'Mohr', 'Monahan', 'Moore', 'Morar', 'Morissette', 'Mosciski', 'Mraz', 'Mueller', 'Muller',
'Murazik', 'Murphy', 'Murray',
'Nader', 'Nicolas', 'Nienow', 'Nikolaus', 'Nitzsche', 'Nolan',
'Oberbrunner', 'O\'Connell', 'O\'Conner', 'O\'Hara', 'O\'Keefe', 'O\'Kon', 'Okuneva', 'Olson', 'Ondricka',
'O\'Reilly', 'Orn', 'Ortiz', 'Osinski',
'Pacocha', 'Padberg', 'Pagac', 'Parisian', 'Parker', 'Paucek', 'Pfannerstill', 'Pfeffer', 'Pollich', 'Pouros',
'Powlowski', 'Predovic', 'Price', 'Prohaska', 'Prosacco', 'Purdy',
'Quigley', 'Quitzon',
'Rath', 'Ratke', 'Rau', 'Raynor', 'Reichel', 'Reichert', 'Reilly', 'Reinger', 'Rempel', 'Renner', 'Reynolds',
'Rice', 'Rippin', 'Ritchie', 'Robel', 'Roberts', 'Rodriguez', 'Rogahn', 'Rohan', 'Rolfson', 'Romaguera', 'Roob',
'Rosenbaum', 'Rowe', 'Ruecker', 'Runolfsdottir', 'Runolfsson', 'Runte', 'Russel', 'Rutherford', 'Ryan',
'Sanford', 'Satterfield', 'Sauer', 'Sawayn',
'Schaden', 'Schaefer', 'Schamberger', 'Schiller', 'Schimmel', 'Schinner', 'Schmeler', 'Schmidt', 'Schmitt',
'Schneider', 'Schoen', 'Schowalter', 'Schroeder', 'Schulist', 'Schultz', 'Schumm', 'Schuppe', 'Schuster',
'Senger', 'Shanahan', 'Shields', 'Simonis', 'Sipes', 'Skiles', 'Smith', 'Smitham', 'Spencer', 'Spinka',
'Sporer', 'Stamm', 'Stanton', 'Stark', 'Stehr', 'Steuber', 'Stiedemann', 'Stokes', 'Stoltenberg', 'Stracke',
'Streich', 'Stroman', 'Strosin', 'Swaniawski', 'Swift',
'Terry', 'Thiel', 'Thompson', 'Tillman', 'Torp', 'Torphy', 'Towne', 'Toy', 'Trantow', 'Tremblay', 'Treutel',
'Tromp', 'Turcotte', 'Turner',
'Ullrich', 'Upton',
'Vandervort', 'Veum', 'Volkman', 'Von', 'VonRueden',
'Waelchi', 'Walker', 'Walsh', 'Walter', 'Ward', 'Waters', 'Watsica', 'Weber', 'Wehner', 'Weimann', 'Weissnat',
'Welch', 'West', 'White', 'Wiegand', 'Wilderman', 'Wilkinson', 'Will', 'Williamson', 'Willms', 'Windler',
'Wintheiser', 'Wisoky', 'Wisozk', 'Witting', 'Wiza', 'Wolf', 'Wolff', 'Wuckert', 'Wunsch', 'Wyman',
'Yost', 'Yundt',
'Zboncak', 'Zemlak', 'Ziemann', 'Zieme', 'Zulauf'
)
prefixes_female = ('Mrs.', 'Ms.', 'Miss', 'Dr.')
prefixes_male = ('Mr.', 'Dr.')
suffixes_female = ('MD', 'DDS', 'PhD', 'DVM')
suffixes_male = ('Jr.', 'Sr.', 'I', 'II', 'III', 'IV', 'V', 'MD', 'DDS', 'PhD', 'DVM')
| 74.399657
| 120
| 0.535758
|
acff3f779ac02e7170d48022034319cb621f4f76
| 3,003
|
py
|
Python
|
wpdhack/_nbdev.py
|
AyrtonB/WPD-Hackathon
|
3027570ecd5a7b0866fe3d72e5d74facdf4e8efe
|
[
"MIT"
] | 1
|
2021-12-27T00:46:54.000Z
|
2021-12-27T00:46:54.000Z
|
wpdhack/_nbdev.py
|
AyrtonB/WPD-Hackathon
|
3027570ecd5a7b0866fe3d72e5d74facdf4e8efe
|
[
"MIT"
] | null | null | null |
wpdhack/_nbdev.py
|
AyrtonB/WPD-Hackathon
|
3027570ecd5a7b0866fe3d72e5d74facdf4e8efe
|
[
"MIT"
] | null | null | null |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"clean_real_power_df": "01-data-processing.ipynb",
"load_real_power_dataset": "01-data-processing.ipynb",
"load_datetime_df": "01-data-processing.ipynb",
"load_weather_df": "01-data-processing.ipynb",
"construct_baseline_features_target_dfs": "01-data-processing.ipynb",
"dt_rng_to_SPs": "03a-feature-generation.ipynb",
"create_temporal_features": "03a-feature-generation.ipynb",
"create_dir_speed_features": "03a-feature-generation.ipynb",
"add_col_suffix": "03a-feature-generation.ipynb",
"isfloat": "03a-feature-generation.ipynb",
"iterate_feature_gen_over_site_gridpoints": "03a-feature-generation.ipynb",
"get_grid_points": "03a-feature-generation.ipynb",
"calc_hcdh_factor": "03a-feature-generation.ipynb",
"create_hcdh_features": "03a-feature-generation.ipynb",
"create_feature_stats": "03a-feature-generation.ipynb",
"create_combined_feature_stats": "03a-feature-generation.ipynb",
"create_demand_stat_features": "03a-feature-generation.ipynb",
"create_solar_features": "03a-feature-generation.ipynb",
"create_prev_month_stats_df": "03a-feature-generation.ipynb",
"create_lagged_df": "03a-feature-generation.ipynb",
"creat_demand_ts_pcs": "03a-feature-generation.ipynb",
"create_rate_of_change_features": "03a-feature-generation.ipynb",
"clean_and_normalise_data": "03a-feature-generation.ipynb",
"process_features": "03a-feature-generation.ipynb",
"create_additional_features": "03a-feature-generation.ipynb",
"ScikitModel": "04-model-suite.ipynb",
"create_train_test_indexes": "04-model-suite.ipynb",
"calculate_error_metrics": "04-model-suite.ipynb",
"calc_month_error_metrics": "04-model-suite.ipynb",
"construct_prediction_df": "04-model-suite.ipynb",
"ModelSuite": "04-model-suite.ipynb",
"load_module_attr": "04-model-suite.ipynb",
"run_parameterised_model": "04-model-suite.ipynb",
"plot_obsv_v_pred": "04-model-suite.ipynb",
"create_residual_bin_avgs_s": "04-model-suite.ipynb",
"plot_residual_bin_avgs": "04-model-suite.ipynb",
"plot_pred_sample": "04-model-suite.ipynb",
"flatten_list": "04-model-suite.ipynb",
"plot_residuals_dist": "04-model-suite.ipynb",
"visualise_errors": "04-model-suite.ipynb",
"save_params": "04-model-suite.ipynb",
"load_params": "04-model-suite.ipynb",
"test_parameterised_model": "08-testing.ipynb"}
modules = ["data.py",
"feature.py",
"suite.py",
"models.py",
"test.py"]
doc_url = "https://AyrtonB.github.io/WPD-Hackathon/"
git_url = "https://github.com/AyrtonB/WPD-Hackathon"
def custom_doc_links(name): return None
| 50.05
| 84
| 0.667999
|
acff40521f09b4784b6f9b1fdf537512f61f8366
| 4,104
|
py
|
Python
|
tests/mock_tests.py
|
pullyl/gspread
|
4d8a1fd2125af33120b4ff1f6d67453348ed4d9e
|
[
"MIT"
] | 3
|
2017-09-21T10:54:58.000Z
|
2019-05-01T00:25:14.000Z
|
tests/mock_tests.py
|
pullyl/gspread
|
4d8a1fd2125af33120b4ff1f6d67453348ed4d9e
|
[
"MIT"
] | null | null | null |
tests/mock_tests.py
|
pullyl/gspread
|
4d8a1fd2125af33120b4ff1f6d67453348ed4d9e
|
[
"MIT"
] | 1
|
2021-01-07T09:25:49.000Z
|
2021-01-07T09:25:49.000Z
|
"""A test suite that doesn't query the Google API.
Avoiding direct network access is benefitial in that it markedly speeds up
testing, avoids error-prone credential setup, and enables validation even if
internet access is unavailable.
"""
from datetime import datetime
import unittest
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
try:
from unittest import mock
except ImportError:
import mock
import gspread
from tests import test
from tests import test_utils
class MockUtilsTest(test.UtilsTest):
pass
class MockGspreadTest(unittest.TestCase):
"""This is the base class for all tests not accessing the API.
IMPORTANT: This class must be inherited _BEFORE_ a test suite inheriting
from GspreadTest. This allows MockGspreadTest.setUpClass to clobber the
one inherited from GspreadTest which authorizes with the Google API.
"""
@classmethod
def setUpClass(cls):
try:
cls.config = ConfigParser.RawConfigParser()
cls.gc = gspread.client.Client(auth={})
except IOError as e:
msg = "Can't find %s for reading test configuration. "
raise Exception(msg % e.filename)
class MockClientTest(MockGspreadTest, test.ClientTest):
"""Test for gspread.Client that mocks out the server response.
The tests themselves are inherited from ClientTest so no redefinition is
necessary.
"""
@classmethod
def setUpClass(cls):
super(MockClientTest, cls).setUpClass()
key = '0123456789ABCDEF'
title = 'This is a spreadsheet title'
url = 'https://docs.google.com/spreadsheet/ccc?key=' + key
updated = datetime.now()
dev_email = 'foobar@developer.gserviceaccount.com'
user_name = 'First Last'
user_email = 'real_email@gmail.com'
# Initialize mock ConfigParser
cls.config.add_section('Spreadsheet')
cls.config.set('Spreadsheet', 'key', key)
cls.config.set('Spreadsheet', 'title', title)
cls.config.set('Spreadsheet', 'url', url)
# Set up spreadsheet mock
feed_obj = test_utils.SpreadsheetFeed(updated, dev_email)
feed_obj.add_entry(key, title, user_name, user_email, updated)
feed = feed_obj.to_xml()
cls.gc.get_spreadsheets_feed = mock.Mock(return_value=feed)
post_mock = mock.MagicMock()
post_mock.return_value.json.return_value = {'id': key}
cls.gc.session.post = post_mock
class MockSpreadsheetTest(MockGspreadTest, test.SpreadsheetTest):
"""Test for gspread.Spreadsheet that mocks out the server response.
The tests themselves are inherited from SpreadsheetTest so no redefinition
is necessary.
"""
@classmethod
def setUpClass(cls):
super(MockSpreadsheetTest, cls).setUpClass()
updated = datetime.now()
user_name = 'First Last'
user_email = 'real_email@gmail.com'
key = '0123456789ABCDEF'
title = 'This is a spreadsheet title'
ws_feed = test_utils.WorksheetFeed(updated, user_name, user_email,
key, title)
dev_email = 'foobar@developer.gserviceaccount.com'
ss_feed = test_utils.SpreadsheetFeed(updated, dev_email)
ss_feed.add_entry(key, title, user_name, user_email, updated)
ws_key = 'AB64KEY'
ws_title = 'WS Title'
ws_id = 123456789
ws_version = 'avkey'
num_cols = 10
num_rows = 10
ws_updated = updated
ws_feed.add_entry(ws_key, ws_title, ws_id, ws_version, num_cols,
num_rows, ws_updated)
# Initialize mock ConfigParser
cls.config.add_section('Spreadsheet')
cls.config.set('Spreadsheet', 'id', key)
cls.config.set('Spreadsheet', 'title', title)
cls.config.set('Spreadsheet', 'sheet1_title', ws_title)
# Set up mocks
cls.gc.get_spreadsheets_feed = mock.Mock(return_value=ss_feed.to_xml())
cls.gc.get_worksheets_feed = mock.Mock(return_value=ws_feed.to_xml())
| 32.314961
| 79
| 0.669834
|
acff41e54930eaaad1c9ec0054d16bf67a8ec242
| 11,654
|
py
|
Python
|
manim/utils/rate_functions.py
|
PhotonSpheres/manim
|
7399c24b33095e29633fd75460d13eae5703cba9
|
[
"MIT"
] | 9,497
|
2020-05-19T04:05:51.000Z
|
2022-03-31T19:00:02.000Z
|
manim/utils/rate_functions.py
|
PhotonSpheres/manim
|
7399c24b33095e29633fd75460d13eae5703cba9
|
[
"MIT"
] | 2,052
|
2020-05-19T03:35:26.000Z
|
2022-03-31T16:18:06.000Z
|
manim/utils/rate_functions.py
|
PhotonSpheres/manim
|
7399c24b33095e29633fd75460d13eae5703cba9
|
[
"MIT"
] | 1,016
|
2020-05-20T01:16:11.000Z
|
2022-03-30T16:47:14.000Z
|
"""A selection of rate functions, i.e., *speed curves* for animations.
Please find a standard list at https://easings.net/. Here is a picture
for the non-standard ones
.. manim:: RateFuncExample
:save_last_frame:
class RateFuncExample(Scene):
def construct(self):
x = VGroup()
for k, v in rate_functions.__dict__.items():
if "function" in str(v):
if (
not k.startswith("__")
and not k.startswith("sqrt")
and not k.startswith("bezier")
):
try:
rate_func = v
plot = (
ParametricFunction(
lambda x: [x, rate_func(x), 0],
t_range=[0, 1, .01],
use_smoothing=False,
color=YELLOW,
)
.stretch_to_fit_width(1.5)
.stretch_to_fit_height(1)
)
plot_bg = SurroundingRectangle(plot).set_color(WHITE)
plot_title = (
Text(rate_func.__name__, weight=BOLD)
.scale(0.5)
.next_to(plot_bg, UP, buff=0.1)
)
x.add(VGroup(plot_bg, plot, plot_title))
except: # because functions `not_quite_there`, `function squish_rate_func` are not working.
pass
x.arrange_in_grid(cols=8)
x.height = config.frame_height
x.width = config.frame_width
x.move_to(ORIGIN).scale(0.95)
self.add(x)
There are primarily 3 kinds of standard easing functions:
#. Ease In - The animation has a smooth start.
#. Ease Out - The animation has a smooth end.
#. Ease In Out - The animation has a smooth start as well as smooth end.
.. note:: The standard functions are not exported, so to use them you do something like this:
rate_func=rate_functions.ease_in_sine
On the other hand, the non-standard functions, which are used more commonly, are exported and can be used directly.
.. manim:: RateFunctions1Example
class RateFunctions1Example(Scene):
def construct(self):
line1 = Line(3*LEFT, 3*RIGHT).shift(UP).set_color(RED)
line2 = Line(3*LEFT, 3*RIGHT).set_color(GREEN)
line3 = Line(3*LEFT, 3*RIGHT).shift(DOWN).set_color(BLUE)
dot1 = Dot().move_to(line1.get_left())
dot2 = Dot().move_to(line2.get_left())
dot3 = Dot().move_to(line3.get_left())
label1 = Tex("Ease In").next_to(line1, RIGHT)
label2 = Tex("Ease out").next_to(line2, RIGHT)
label3 = Tex("Ease In Out").next_to(line3, RIGHT)
self.play(
FadeIn(VGroup(line1, line2, line3)),
FadeIn(VGroup(dot1, dot2, dot3)),
Write(VGroup(label1, label2, label3)),
)
self.play(
MoveAlongPath(dot1, line1, rate_func=rate_functions.ease_in_sine),
MoveAlongPath(dot2, line2, rate_func=rate_functions.ease_out_sine),
MoveAlongPath(dot3, line3, rate_func=rate_functions.ease_in_out_sine),
run_time=7
)
self.wait()
"""
__all__ = [
"linear",
"smooth",
"rush_into",
"rush_from",
"slow_into",
"double_smooth",
"there_and_back",
"there_and_back_with_pause",
"running_start",
"not_quite_there",
"wiggle",
"squish_rate_func",
"lingering",
"exponential_decay",
]
import typing
from functools import wraps
from math import sqrt
import numpy as np
from ..utils.bezier import bezier
from ..utils.simple_functions import sigmoid
# This is a decorator that makes sure any function it's used on will
# return 0 if t<0 and 1 if t>1.
def unit_interval(function):
@wraps(function)
def wrapper(t, *args, **kwargs):
if 0 <= t <= 1:
return function(t, *args, **kwargs)
elif t < 0:
return 0
else:
return 1
return wrapper
# This is a decorator that makes sure any function it's used on will
# return 0 if t<0 or t>1.
def zero(function):
@wraps(function)
def wrapper(t, *args, **kwargs):
if 0 <= t <= 1:
return function(t, *args, **kwargs)
else:
return 0
return wrapper
@unit_interval
def linear(t: float) -> float:
return t
@unit_interval
def smooth(t: float, inflection: float = 10.0) -> float:
error = sigmoid(-inflection / 2)
return min(
max((sigmoid(inflection * (t - 0.5)) - error) / (1 - 2 * error), 0),
1,
)
@unit_interval
def rush_into(t: float, inflection: float = 10.0) -> float:
return 2 * smooth(t / 2.0, inflection)
@unit_interval
def rush_from(t: float, inflection: float = 10.0) -> float:
return 2 * smooth(t / 2.0 + 0.5, inflection) - 1
@unit_interval
def slow_into(t: float) -> float:
return np.sqrt(1 - (1 - t) * (1 - t))
@unit_interval
def double_smooth(t: float) -> float:
if t < 0.5:
return 0.5 * smooth(2 * t)
else:
return 0.5 * (1 + smooth(2 * t - 1))
@zero
def there_and_back(t: float, inflection: float = 10.0) -> float:
new_t = 2 * t if t < 0.5 else 2 * (1 - t)
return smooth(new_t, inflection)
@zero
def there_and_back_with_pause(t: float, pause_ratio: float = 1.0 / 3) -> float:
a = 1.0 / pause_ratio
if t < 0.5 - pause_ratio / 2:
return smooth(a * t)
elif t < 0.5 + pause_ratio / 2:
return 1
else:
return smooth(a - a * t)
@unit_interval
def running_start(
t: float,
pull_factor: float = -0.5,
) -> typing.Iterable: # what is func return type?
return bezier([0, 0, pull_factor, pull_factor, 1, 1, 1])(t)
def not_quite_there(
func: typing.Callable[[float], float] = smooth,
proportion: float = 0.7,
) -> typing.Callable[[float], float]:
def result(t):
return proportion * func(t)
return result
@zero
def wiggle(t: float, wiggles: float = 2) -> float:
return there_and_back(t) * np.sin(wiggles * np.pi * t)
def squish_rate_func(
func: typing.Callable[[float], float],
a: float = 0.4,
b: float = 0.6,
) -> typing.Callable[[float], float]:
def result(t):
if a == b:
return a
if t < a:
return func(0)
elif t > b:
return func(1)
else:
return func((t - a) / (b - a))
return result
# Stylistically, should this take parameters (with default values)?
# Ultimately, the functionality is entirely subsumed by squish_rate_func,
# but it may be useful to have a nice name for with nice default params for
# "lingering", different from squish_rate_func's default params
@unit_interval
def lingering(t: float) -> float:
return squish_rate_func(lambda t: t, 0, 0.8)(t)
@unit_interval
def exponential_decay(t: float, half_life: float = 0.1) -> float:
# The half-life should be rather small to minimize
# the cut-off error at the end
return 1 - np.exp(-t / half_life)
@unit_interval
def ease_in_sine(t: float) -> float:
return 1 - np.cos((t * np.pi) / 2)
@unit_interval
def ease_out_sine(t: float) -> float:
return np.sin((t * np.pi) / 2)
@unit_interval
def ease_in_out_sine(t: float) -> float:
return -(np.cos(np.pi * t) - 1) / 2
@unit_interval
def ease_in_quad(t: float) -> float:
return t * t
@unit_interval
def ease_out_quad(t: float) -> float:
return 1 - (1 - t) * (1 - t)
@unit_interval
def ease_in_out_quad(t: float) -> float:
return 2 * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 2) / 2
@unit_interval
def ease_in_cubic(t: float) -> float:
return t * t * t
@unit_interval
def ease_out_cubic(t: float) -> float:
return 1 - pow(1 - t, 3)
@unit_interval
def ease_in_out_cubic(t: float) -> float:
return 4 * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 3) / 2
@unit_interval
def ease_in_quart(t: float) -> float:
return t * t * t * t
@unit_interval
def ease_out_quart(t: float) -> float:
return 1 - pow(1 - t, 4)
@unit_interval
def ease_in_out_quart(t: float) -> float:
return 8 * t * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 4) / 2
@unit_interval
def ease_in_quint(t: float) -> float:
return t * t * t * t * t
@unit_interval
def ease_out_quint(t: float) -> float:
return 1 - pow(1 - t, 5)
@unit_interval
def ease_in_out_quint(t: float) -> float:
return 16 * t * t * t * t * t if t < 0.5 else 1 - pow(-2 * t + 2, 5) / 2
@unit_interval
def ease_in_expo(t: float) -> float:
return 0 if t == 0 else pow(2, 10 * t - 10)
@unit_interval
def ease_out_expo(t: float) -> float:
return 1 if t == 1 else 1 - pow(2, -10 * t)
@unit_interval
def ease_in_out_expo(t: float) -> float:
if t == 0:
return 0
elif t == 1:
return 1
elif t < 0.5:
return pow(2, 20 * t - 10) / 2
else:
return (2 - pow(2, -20 * t + 10)) / 2
@unit_interval
def ease_in_circ(t: float) -> float:
return 1 - sqrt(1 - pow(t, 2))
@unit_interval
def ease_out_circ(t: float) -> float:
return sqrt(1 - pow(t - 1, 2))
@unit_interval
def ease_in_out_circ(t: float) -> float:
return (
(1 - sqrt(1 - pow(2 * t, 2))) / 2
if t < 0.5
else (sqrt(1 - pow(-2 * t + 2, 2)) + 1) / 2
)
@unit_interval
def ease_in_back(t: float) -> float:
c1 = 1.70158
c3 = c1 + 1
return c3 * t * t * t - c1 * t * t
@unit_interval
def ease_out_back(t: float) -> float:
c1 = 1.70158
c3 = c1 + 1
return 1 + c3 * pow(t - 1, 3) + c1 * pow(t - 1, 2)
@unit_interval
def ease_in_out_back(t: float) -> float:
c1 = 1.70158
c2 = c1 * 1.525
return (
(pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2
if t < 0.5
else (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2
)
@unit_interval
def ease_in_elastic(t: float) -> float:
c4 = (2 * np.pi) / 3
if t == 0:
return 0
elif t == 1:
return 1
else:
return -pow(2, 10 * t - 10) * np.sin((t * 10 - 10.75) * c4)
@unit_interval
def ease_out_elastic(t: float) -> float:
c4 = (2 * np.pi) / 3
if t == 0:
return 0
elif t == 1:
return 1
else:
return pow(2, -10 * t) * np.sin((t * 10 - 0.75) * c4) + 1
@unit_interval
def ease_in_out_elastic(t: float) -> float:
c5 = (2 * np.pi) / 4.5
if t == 0:
return 0
elif t == 1:
return 1
elif t < 0.5:
return -(pow(2, 20 * t - 10) * np.sin((20 * t - 11.125) * c5)) / 2
else:
return (pow(2, -20 * t + 10) * np.sin((20 * t - 11.125) * c5)) / 2 + 1
@unit_interval
def ease_in_bounce(t: float) -> float:
return 1 - ease_out_bounce(1 - t)
@unit_interval
def ease_out_bounce(t: float) -> float:
n1 = 7.5625
d1 = 2.75
if t < 1 / d1:
return n1 * t * t
elif t < 2 / d1:
return n1 * (t - 1.5 / d1) * (t - 1.5 / d1) + 0.75
elif t < 2.5 / d1:
return n1 * (t - 2.25 / d1) * (t - 2.25 / d1) + 0.9375
else:
return n1 * (t - 2.625 / d1) * (t - 2.625 / d1) + 0.984375
@unit_interval
def ease_in_out_bounce(t: float) -> float:
if t < 0.5:
return (1 - ease_out_bounce(1 - 2 * t)) / 2
else:
return (1 + ease_out_bounce(2 * t - 1)) / 2
| 25.445415
| 119
| 0.548824
|
acff41f707d81dad5455402dea3d3e1742dde386
| 2,699
|
py
|
Python
|
data_anonym_methods/k_anonymity.py
|
GeorgeManakanatas/PPDM
|
9e6af80681db497447197cac14b26b99e588f231
|
[
"MIT",
"Unlicense"
] | 3
|
2016-11-18T07:24:39.000Z
|
2019-07-06T07:45:15.000Z
|
data_anonym_methods/k_anonymity.py
|
GeorgeManakanatas/PPDM
|
9e6af80681db497447197cac14b26b99e588f231
|
[
"MIT",
"Unlicense"
] | 2
|
2017-02-14T15:24:34.000Z
|
2019-11-25T19:18:05.000Z
|
data_anonym_methods/k_anonymity.py
|
GeorgeManakanatas/PPDM
|
9e6af80681db497447197cac14b26b99e588f231
|
[
"MIT",
"Unlicense"
] | 3
|
2017-12-19T07:04:24.000Z
|
2021-08-20T15:42:13.000Z
|
''' k anonymity module
functions:
master: performs selection of the anonymisation methods
simple_kanonymity: performs simplistic anonymisation
'''
def master(start_dataframe, nums, kmin, logger):
'''
implements k Anonymity
Parameters:
argument1 (dataframe): the data that we want to anonymise
argument2 (array): the columns of the dataframe that when combined become
identifying
argument3 (int): the minimum acceptable number of combinations
logger: custom logging function
Returns:
(dataframe): the anonymised dataframe
'''
# should be a list of possible options later
start_dataframe = simple_kanonymity(start_dataframe, nums, kmin, logger)
return start_dataframe
def simple_kanonymity(start_dataframe, nums, kmin, logger):
'''
Performs simple k anonymity
Parameters:
argument1 (dataframe): the data that we want to anonymise
argument2 (array): the columns of the dataframe that when combined become
identifying
argument3 (int): the minimum acceptable number of combinations
logger: custom logging function
Returns:
(dataframe): the anonymised dataframe with no special inteligence involved
in creating the extra rows
'''
logger.info('Performing simplistic K-anonymity')
# getting a count of the identifying column combination
combination_counts = start_dataframe.groupby(nums).size()
logger.info('There are : '+str(len(combination_counts.index))+' identifying combinations')
# keeping those with fewer entries than the minimum needed
in_need_of_expansion = combination_counts[combination_counts < kmin]
logger.info('Only '+str(len(in_need_of_expansion.index))+' are below the '+str(kmin)+' kmin limit')
# looping through all the combinations that need to be expanded with
# false entries to reach our minimum goal
for index_number, identyfying_combination_count in\
enumerate(in_need_of_expansion):
# determine number of false entries needed
false_entries = kmin - identyfying_combination_count
# generate the number of false entries
rows = start_dataframe.sample(n=false_entries)
# loop through every column we want to anonymise
for column in range(len(in_need_of_expansion.index.names)):
# set the values of the column to the one we want to mask
rows[in_need_of_expansion.index.names[column]] = \
in_need_of_expansion.index[index_number][column]
# append new rows to dataframe
start_dataframe = start_dataframe.append(rows, ignore_index=True)
return start_dataframe
if __name__ == '__main__':
master()
| 35.986667
| 103
| 0.722119
|
acff44bdf32f665eb0b8695b5f5df971f3203ee2
| 1,002
|
py
|
Python
|
mysite/mysite/urls.py
|
ifackerx/KMITL-Market
|
9790e7039c46bb4df030059bf67504258c9906b7
|
[
"MIT"
] | 1
|
2019-04-28T15:55:58.000Z
|
2019-04-28T15:55:58.000Z
|
mysite/mysite/urls.py
|
ifackerx/KMITL-Market
|
9790e7039c46bb4df030059bf67504258c9906b7
|
[
"MIT"
] | 4
|
2020-04-30T02:49:55.000Z
|
2022-02-12T09:07:19.000Z
|
mysite/mysite/urls.py
|
ifackerx/KMITL-Market
|
9790e7039c46bb4df030059bf67504258c9906b7
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from . import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 32.322581
| 77
| 0.694611
|
acff4555853ca27e1a7af609ae1dea0121b63777
| 3,314
|
py
|
Python
|
src/scripts/model_transformation/treelite_compile/compile_treelite.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 13
|
2021-08-20T01:03:51.000Z
|
2022-02-12T05:34:46.000Z
|
src/scripts/model_transformation/treelite_compile/compile_treelite.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 199
|
2021-08-21T21:18:53.000Z
|
2022-03-27T23:08:44.000Z
|
src/scripts/model_transformation/treelite_compile/compile_treelite.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 4
|
2021-08-20T06:53:26.000Z
|
2022-01-24T22:22:39.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
TreeLite/Python inferencing script
"""
import os
import sys
import argparse
import logging
import numpy
from distutils.util import strtobool
import pandas as pd
import treelite, treelite_runtime
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
class TreeLightCompileScript(RunnableScript):
def __init__(self):
super().__init__(
task = 'compile',
framework = 'treelite_python',
framework_version = treelite.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--model",
required=False, type=input_file_path, help="Exported model location (file path)")
group_treelite = parser.add_argument_group("Treelite parameters")
group_treelite.add_argument("--model_format",
required=False, default="lightgbm", type=str, help="format of the input --model")
group_treelite.add_argument("--so_path",
required=False, default="./mymodel.so", type=str, help="full path to the saved model")
group_treelite.add_argument("--toolchain",
required=False, default="gcc", type=str, help="toolchain for compiling model")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
logger.info(f"Converting model to Treelite")
with metrics_logger.log_time_block("model_compilation"):
model = treelite.Model.load(
args.model,
model_format=args.model_format
)
model.export_lib(
toolchain=args.toolchain,
libpath=args.so_path,
verbose=True,
params={'parallel_comp':16}
)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return TreeLightCompileScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
TreeLightCompileScript.main(cli_args)
if __name__ == "__main__":
main()
| 32.811881
| 98
| 0.658117
|
acff45fad21f2621a631d3ec5d9fdbfc9b6a3a8b
| 1,996
|
py
|
Python
|
modin/pandas/iterator.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-19T04:01:17.000Z
|
2021-05-19T04:01:17.000Z
|
modin/pandas/iterator.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 57
|
2021-01-22T15:52:03.000Z
|
2021-06-12T18:22:04.000Z
|
modin/pandas/iterator.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-29T12:12:42.000Z
|
2022-01-29T12:12:42.000Z
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Place to define the Modin iterator."""
from collections.abc import Iterator
class PartitionIterator(Iterator):
"""Iterator on partitioned data."""
def __init__(self, df, axis, func):
"""
Construct a iterator on partitioned data.
TODO: add types.
Parameters
----------
df: DataFrame
The dataframe to iterate over.
axis:
axis to iterate over.
func:
The function to get inner iterables from each partition.
"""
self.df = df
self.axis = axis
self.index_iter = (
zip(
iter(slice(None) for _ in range(len(self.df.columns))),
range(len(self.df.columns)),
)
if axis
else zip(
range(len(self.df.index)),
iter(slice(None) for _ in range(len(self.df.index))),
)
)
self.func = func
def __iter__(self):
"""Implement iterator interface."""
return self
def __next__(self):
"""Implement iterator interface."""
key = next(self.index_iter)
df = self.df.iloc[key]
return self.func(df)
| 32.721311
| 87
| 0.623246
|
acff46a3d202b3a0a7a1677c6ee4943395687de0
| 57
|
py
|
Python
|
src/Calculator/Division.py
|
aravetirahul/Statistical-Calculator--master
|
1d36da730cd00677e4d5d794f110f7dd7bf0e783
|
[
"MIT"
] | null | null | null |
src/Calculator/Division.py
|
aravetirahul/Statistical-Calculator--master
|
1d36da730cd00677e4d5d794f110f7dd7bf0e783
|
[
"MIT"
] | null | null | null |
src/Calculator/Division.py
|
aravetirahul/Statistical-Calculator--master
|
1d36da730cd00677e4d5d794f110f7dd7bf0e783
|
[
"MIT"
] | 3
|
2020-03-22T01:56:45.000Z
|
2020-03-22T20:20:48.000Z
|
def div(a, b):
return round((float(a) / float(b)), 2)
| 28.5
| 42
| 0.561404
|
acff46b6721d24ffb23b79eeb11d631d2357ee1b
| 1,808
|
py
|
Python
|
chrome/test/functional/test_pyauto.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 7
|
2015-05-20T22:41:35.000Z
|
2021-11-18T19:07:59.000Z
|
chrome/test/functional/test_pyauto.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 1
|
2015-02-02T06:55:08.000Z
|
2016-01-20T06:11:59.000Z
|
chrome/test/functional/test_pyauto.py
|
Crystalnix/BitPop
|
1fae4ecfb965e163f6ce154b3988b3181678742a
|
[
"BSD-3-Clause"
] | 2
|
2015-12-08T00:37:41.000Z
|
2017-04-06T05:34:05.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import pyauto_functional # Must be imported before pyauto
import pyauto
import pyauto_errors
class PyAutoTest(pyauto.PyUITest):
"""Test functionality of the PyAuto framework."""
_EXTRA_CHROME_FLAGS = [
'--scooby-doo=123',
'--donald-duck=cool',
'--super-mario',
'--marvin-the-martian',
]
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with some custom flags.
Overrides the default list of extra flags passed to Chrome. See
ExtraChromeFlags() in pyauto.py.
"""
return pyauto.PyUITest.ExtraChromeFlags(self) + self._EXTRA_CHROME_FLAGS
def testSetCustomChromeFlags(self):
"""Ensures that Chrome can be launched with custom flags."""
self.NavigateToURL('about://version')
for flag in self._EXTRA_CHROME_FLAGS:
self.assertEqual(self.FindInPage(flag)['match_count'], 1,
msg='Missing expected Chrome flag "%s"' % flag)
def testCallOnInvalidWindow(self):
"""Verify that exception is raised when a browser is missing/invalid."""
self.assertEqual(1, self.GetBrowserWindowCount())
self.assertRaises(
pyauto_errors.JSONInterfaceError,
lambda: self.FindInPage('some text', windex=1)) # invalid window
def testJSONInterfaceTimeout(self):
"""Verify that an exception is raised when the JSON interface times out."""
self.ClearEventQueue()
self.AddDomEventObserver('foo')
self.assertRaises(
pyauto_errors.JSONInterfaceError,
lambda: self.GetNextEvent(timeout=2000)) # event queue is empty
if __name__ == '__main__':
pyauto_functional.Main()
| 31.719298
| 79
| 0.709624
|
acff476d0f8cd29981ddda471d73611e1f7f3d15
| 784
|
py
|
Python
|
demo/constant.py
|
Lm0079/SpeciesClassification
|
0487af0e59f2c9d759711a5b5bf7e9f3167ef968
|
[
"MIT"
] | null | null | null |
demo/constant.py
|
Lm0079/SpeciesClassification
|
0487af0e59f2c9d759711a5b5bf7e9f3167ef968
|
[
"MIT"
] | null | null | null |
demo/constant.py
|
Lm0079/SpeciesClassification
|
0487af0e59f2c9d759711a5b5bf7e9f3167ef968
|
[
"MIT"
] | 1
|
2020-07-30T12:58:18.000Z
|
2020-07-30T12:58:18.000Z
|
COMMON = 'COMMON'
SEARCH = 'SEARCH'
SEARCH_INDEX_PATH = "search_index_path"
ROOT_PATH = 'root_path'
SAMPLE_PATH = 'sample_path'
SAMPLE_URL = 'sample_url'
SEARCH_IMAGE_PATH = 'search_image_path'
TAXA_FILE_PATH = 'taxa_file_path'
SEARCH_IMG_REL_URL = 'search_img_rel_url'
SPECIES_COMMON_NAME = 'species_common_name'
SPECIES_SCIENTIFIC_NAME = 'species_scientific_name'
PARENT_NAME_USAGE_ID = 'parent_name_usage_id'
GENUS = 'genus'
GENUS_COMMON_NAME = 'genus_common_name'
PATH = 'path'
IMAGE_FOUND = 'image_found'
SEARCH_RANKING = 'search_ranking'
CSV_VERNACULAR_NAME = 'vernacularName'
CSV_SCIENTIFIC_NAME = 'scientificName'
CSV_TAXON_RANK = 'taxonRank'
CSV_TAXON_ID = 'taxonID'
CSV_PARENT_NAME_USAGE_ID = 'parentNameUsageID'
ACCOUNT_NAME = "account_name"
ACCOUNT_KEY = "account_key"
| 28
| 51
| 0.8125
|
acff49128bc31821bf625cedfa4666fcd7eedfe1
| 3,530
|
py
|
Python
|
observatorio/settings.py
|
bispojr/observatorio-ufj-covid19
|
8667fae1367b95a7dfa8558fbac3b1b0b708af8d
|
[
"MIT"
] | 3
|
2020-04-02T21:59:19.000Z
|
2020-12-03T12:37:26.000Z
|
observatorio/settings.py
|
bispojr/observatorio-ufj-covid19
|
8667fae1367b95a7dfa8558fbac3b1b0b708af8d
|
[
"MIT"
] | 68
|
2020-03-28T22:40:08.000Z
|
2020-07-08T18:04:07.000Z
|
observatorio/settings.py
|
bispojr/observatorio-ufj-covid19
|
8667fae1367b95a7dfa8558fbac3b1b0b708af8d
|
[
"MIT"
] | 5
|
2020-03-28T21:35:30.000Z
|
2020-06-10T01:28:14.000Z
|
import os
from decouple import config
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# endpoits url statics
URL_BRASIL_IO = "https://brasil.io/api/dataset/covid19/caso_full/data/"
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
ALLOWED_HOSTS = ["deolhonocorona.herokuapp.com", "127.0.0.1", "www.deolhonocorona.com", "deolhonocorona.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"apps.core",
"django_nose"
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'observatorio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'observatorio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# config database heroku
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = { 'default': config('DATABASE_URL', default=default_dburl, cast=dburl), }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "pt-br"
TIME_ZONE = "America/Sao_Paulo"
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# "static" is a name relative to paths
STATIC_URL = '/static/'
# dir used to production, dont touch here
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# dir to develop, you can touch here
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static/"),
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| 27.578125
| 109
| 0.7
|
acff497489642ab07727f0c0f5b8b19d816d4b46
| 208,087
|
py
|
Python
|
catimages.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
catimages.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
catimages.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Image by content categorization derived from 'checkimages.py'.
Script to check uncategorized files. This script checks if a file
has some content that allows to assign it to a category.
This script runs on commons only. It needs also external libraries
(see imports and comments there) and additional configuration/data
files in order to run properly. Most of them can be checked-out at:
http://svn.toolserver.org/svnroot/drtrigon/
(some code might get compiled on-the-fly, so a GNU compiler along
with library header files is needed too)
This script understands the following command-line arguments:
-cat[:#] Use a category as recursive generator
(if no given 'Category:Media_needing_categories' is used)
-start[:#] Start after File:[:#] or if no file given start from top
(instead of resuming last run).
-limit The number of images to check (default: 80)
-noguesses If given, this option will disable all guesses (which are
less reliable than true searches).
-single:# Run for one (any) single page only.
-train Train classifiers on good (homegenous) categories.
X-sendemail Send an email after tagging.
X-untagged[:#] Use daniel's tool as generator:
X http://toolserver.org/~daniel/WikiSense/UntaggedImages.php
"""
#
# (C) Kyle/Orgullomoore, 2006-2007 (newimage.py)
# (C) Pywikipedia team, 2007-2011 (checkimages.py)
# (C) DrTrigon, 2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: acff497489642ab07727f0c0f5b8b19d816d4b46 $'
#
# python default packages
import re, urllib2, os, locale, sys, datetime, math, shutil, mimetypes, shelve
import StringIO, json
from subprocess import Popen, PIPE
try:
import Image # classic 'PIL'
except ImportError:
from PIL import Image # new 'PIL' fork 'Pillow' (fedora 19)
import imghdr
#import ImageFilter
scriptdir = os.path.dirname(sys.argv[0])
if not os.path.isabs(scriptdir):
scriptdir = os.path.abspath(os.path.join(os.curdir, scriptdir))
# additional python packages (non-default but common)
sys.exc_clear()
try:
import numpy as np
from scipy import ndimage, fftpack#, signal
import cv
# TS: nonofficial cv2.so backport of the testing-version of
# python-opencv because of missing build-host, done by DaB
sys.path.append('/usr/local/lib/python2.6/')
import cv2
sys.path.remove('/usr/local/lib/python2.6/')
import pyexiv2
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import gtk # ignore warning: "GtkWarning: could not open display"
import rsvg # gnome-python2-rsvg (binding to librsvg)
import cairo
import magic # python-magic (binding to libmagic)
except:
# either raise the ImportError later or skip it
pass
# pywikipedia framework python packages
import wikipedia as pywikibot
import pagegenerators, catlib
import checkimages
import externals # allow import from externals
# additional python packages (more exotic and problematic ones)
# modules needing compilation are imported later on request:
# (see https://jira.toolserver.org/browse/TS-1452)
# e.g. opencv, jseg, slic, pydmtx, zbar, (pyml or equivalent)
# binaries: exiftool, pdftotext/pdfimages (poppler), ffprobe (ffmpeg),
# convert/identify (ImageMagick), (ocropus)
# TODO:
# (pdfminer not used anymore/at the moment...)
# python-djvulibre or python-djvu for djvu support
externals.check_setup('colormath') # check for and install needed
externals.check_setup('jseg') # 'externals' modules
externals.check_setup('jseg/jpeg-6b') #
#externals.check_setup('_mlpy') #
externals.check_setup('_music21') #
externals.check_setup('opencv/haarcascades') #
externals.check_setup('pydmtx') # <<< !!! test OS package management here !!!
externals.check_setup('py_w3c') #
externals.check_setup('_zbar') #
import pycolorname
#import _mlpy as mlpy
from colormath.color_objects import RGBColor
from py_w3c.validators.html.validator import HTMLValidator, ValidationFault
#from pdfminer import pdfparser, pdfinterp, pdfdevice, converter, cmapdb, layout
#externals.check_setup('_ocropus')
locale.setlocale(locale.LC_ALL, '')
###############################################################################
# <--------------------------- Change only below! --------------------------->#
###############################################################################
# NOTE: in the messages used by the Bot if you put __botnick__ in the text, it
# will automatically replaced with the bot's nickname.
# Add your project (in alphabetical order) if you want that the bot start
project_inserted = [u'commons',]
# Ok, that's all. What is below, is the rest of code, now the code is fixed and it will run correctly in your project.
################################################################################
# <--------------------------- Change only above! ---------------------------> #
################################################################################
tmpl_FileContentsByBot = u"""}}
{{FileContentsByBot
| botName = ~~~
|"""
# this list is auto-generated during bot run (may be add notifcation about NEW templates)
#tmpl_available_spec = [ u'Properties', u'ColorRegions', u'Faces', u'ColorAverage' ]
tmpl_available_spec = [] # auto-generated
# global
useGuesses = True # Use guesses which are less reliable than true searches
# all detection and recognition methods - bindings to other classes, modules and libs
class _UnknownFile(object):
def __init__(self, file_name, file_mime, *args, **kwargs):
self.file_name = file_name
self.file_mime = file_mime
self.image_size = (None, None)
# available file properties and metadata
self._properties = { 'Properties': [{'Format': u'-', 'Length': -1}],
'Metadata': [], }
# available feature to extract
self._features = { 'ColorAverage': [],
'ColorRegions': [],
'Faces': [],
'People': [],
'OpticalCodes': [],
'Chessboard': [],
'History': [],
'Text': [],
'Streams': [],
#'Audio': [],
'Legs': [],
'Hands': [],
'Torsos': [],
'Ears': [],
'Eyes': [],
'Automobiles': [],
'Classify': [], }
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def getProperties(self):
self._detect_HeaderAndMetadata() # Metadata
self._detect_Properties() # Properties
return self._properties
def getFeatures(self):
pywikibot.warning(u"File format '%s/%s' not supported (yet)!" % tuple(self.file_mime[:2]))
return self._features
def _detect_HeaderAndMetadata(self):
# check/look into the file by midnight commander (mc)
# use exif as first hint - in fact gives also image-size, streams, ...
exif = self._util_get_DataTags_EXIF()
#print exif
misc = []
misc += [exif['Output_extension']] if 'Output_extension' in exif else []
misc += [exif['DescProducer']] if 'DescProducer' in exif else []
misc += [exif['DescCreator']] if 'DescCreator' in exif else []
result = { 'Software': u'%s'%exif['Software'] if 'Software' in exif else u'-',
'Desc': u'%s'%exif['Desc'] if 'Desc' in exif else u'-',
'Comment': u'%s'%exif['Comment'] if 'Comment' in exif else u'-',
'Producer': u'%s'%exif['Producer'] if 'Producer' in exif else u'-',
'Misc': u'\n'.join(misc) if misc else u'-',}
#'Output_Extension': exif['Output_extension'] if 'Output_extension' in exif else u'-',
#'DescProducer': exif['DescProducer'] if 'DescProducer' in exif else u'-',
#'DescCreator': exif['DescCreator'] if 'DescCreator' in exif else u'-',
#'Comments': exif['Comments'] if 'Comments' in exif else u'-',
#'WorkDesc': exif['WorkDescription'] if 'WorkDescription' in exif else u'-',
##'Dimensions': tuple(map(int, exif['ImageSize'].split(u'x'))),}
#'Dimensions': tuple(exif['ImageSize'].split(u'x')) if 'ImageSize' in exif else (None, None),}
#'Mode': exif['ColorType'], }
## https://pypi.python.org/pypi/hachoir-metadata (needs 'core' and 'parser')
#
#from hachoir_core.error import HachoirError
#from hachoir_core.stream import InputStreamError
#from hachoir_parser import createParser
#import hachoir_core.config as hachoir_config
#
#from hachoir_metadata import extractMetadata
#
#hachoir_config.debug = True
#hachoir_config.verbose = True
#hachoir_config.quiet = True
#
## Create parser
#try:
# parser = createParser(self.file_name.decode('utf-8'),
# real_filename=self.file_name.encode('utf-8'),
# tags=None)
# #print [val for val in enumerate(parser.createFields())]
# desc = parser.description
# ptags = parser.getParserTags()
#except (InputStreamError, AttributeError):
# desc = u'-'
# ptags = {}
#
## Extract metadata
#try:
# # quality: 0.0 fastest, 1.0 best, and default is 0.5
# metadata = extractMetadata(parser, 0.5)
# #mtags = dict([(key, metadata.getValues(key))
# mtags = dict([(key, metadata.getValues(key)) # get, getItem, getItems, getText
# for key in metadata._Metadata__data.keys()#])
# if metadata.getValues(key)])
#except (HachoirError, AttributeError):
# mtags = {}
#
##result = {'parser_desc': desc, 'parserdata': ptags, 'metadata': mtags}
##print result
#print {'parser_desc': desc, 'parserdata': ptags, 'metadata': mtags}
#
### Display metadatas on stdout
##text = metadata.exportPlaintext(priority=None, human=False)
##if not text:
## text = [u"(no metadata, priority may be too small, try priority=999)"]
##print u'\n'.join(text)
self._properties['Metadata'] = [result]
#print self._properties['Metadata']
return
def _detect_Properties(self):
# get mime-type file-size, ...
pass
def _util_get_DataTags_EXIF(self):
# http://tilloy.net/dev/pyexiv2/tutorial.html
# (is UNFORTUNATELY NOT ABLE to handle all tags, e.g. 'FacesDetected', ...)
if hasattr(self, '_buffer_EXIF'):
return self._buffer_EXIF
res = {}
enable_recovery() # enable recovery from hard crash
try:
if hasattr(pyexiv2, 'ImageMetadata'):
metadata = pyexiv2.ImageMetadata(self.file_name)
metadata.read()
for key in metadata.exif_keys:
res[key] = metadata[key]
for key in metadata.iptc_keys:
res[key] = metadata[key]
for key in metadata.xmp_keys:
res[key] = metadata[key]
else:
image = pyexiv2.Image(self.file_name)
image.readMetadata()
for key in image.exifKeys():
res[key] = image[key]
for key in image.iptcKeys():
res[key] = image[key]
#for key in image.xmpKeys():
# res[key] = image[key]
except IOError:
pass
except RuntimeError:
pass
disable_recovery() # disable since everything worked out fine
# http://www.sno.phy.queensu.ca/~phil/exiftool/
# MIGHT BE BETTER TO USE AS PYTHON MODULE; either by wrapper or perlmodule:
# http://search.cpan.org/~gaas/pyperl-1.0/perlmodule.pod
# (or use C++ with embbedded perl to write a python module)
data = Popen("exiftool -j %s" % self.file_name,
shell=True, stdout=PIPE).stdout.read()
if not data:
raise ImportError("exiftool not found!")
try: # work-a-round for badly encoded exif data (from pywikibot/comms/http.py)
data = unicode(data, 'utf-8', errors = 'strict')
except UnicodeDecodeError:
data = unicode(data, 'utf-8', errors = 'replace')
#res = {}
data = re.sub("(?<!\")\(Binary data (?P<size>\d*) bytes\)", "\"(Binary data \g<size> bytes)\"", data) # work-a-round some issue
for item in json.loads(data):
res.update( item )
#print res
self._buffer_EXIF = res
return self._buffer_EXIF
class _JpegFile(_UnknownFile):
# for '_detect_Trained'
cascade_files = [(u'Legs', 'haarcascade_lowerbody.xml'),
(u'Torsos', 'haarcascade_upperbody.xml'),
(u'Ears', 'haarcascade_mcs_leftear.xml'),
(u'Ears', 'haarcascade_mcs_rightear.xml'),
(u'Eyes', 'haarcascade_lefteye_2splits.xml'), # (http://yushiqi.cn/research/eyedetection)
(u'Eyes', 'haarcascade_righteye_2splits.xml'), # (http://yushiqi.cn/research/eyedetection)
#externals/opencv/haarcascades/haarcascade_mcs_lefteye.xml
#externals/opencv/haarcascades/haarcascade_mcs_righteye.xml
# (others include indifferent (left and/or right) and pair)
(u'Automobiles', 'cars3.xml'), # http://www.youtube.com/watch?v=c4LobbqeKZc
(u'Hands', '1256617233-2-haarcascade-hand.xml', 300.),] # http://www.andol.info/
# ('Hands' does not behave very well, in fact it detects any kind of skin and other things...)
#(u'Aeroplanes', 'haarcascade_aeroplane.xml'),] # e.g. for 'Category:Unidentified aircraft'
def __init__(self, file_name, file_mime, *args, **kwargs):
_UnknownFile.__init__(self, file_name, file_mime)
self.image_filename = os.path.split(self.file_name)[-1]
self.image_path = self.file_name
self.image_path_JPEG = self.image_path + '.jpg'
self._convert()
def __exit__(self, type, value, traceback):
#if os.path.exists(self.image_path):
# os.remove( self.image_path )
if os.path.exists(self.image_path_JPEG):
os.remove( self.image_path_JPEG )
#image_path_new = self.image_path_JPEG.replace(u"cache/", u"cache/0_DETECTED_")
#if os.path.exists(image_path_new):
# os.remove( image_path_new )
def getFeatures(self):
# Faces (extract EXIF data)
self._detect_Faces_EXIF()
# Faces and eyes (opencv pre-trained haar)
self._detect_Faces()
# Face via Landmark(s)
# SWITCHED OFF; needs lots of libraries and disk space for minor improvement
#self._detect_FaceLandmark_xBOB()
# exclude duplicates (CV and EXIF)
faces = [item['Position'] for item in self._features['Faces']]
for i in self._util_merge_Regions(faces)[1]:
del self._features['Faces'][i]
# Segments and colors
self._detect_SegmentColors()
# Average color
self._detect_AverageColor()
# People/Pedestrian (opencv pre-trained hog and haarcascade)
self._detect_People()
# Geometric object (opencv hough line, circle, edges, corner, ...)
self._detect_Geometry()
# general (opencv pre-trained, third-party and self-trained haar
# and cascade) classification
# http://www.computer-vision-software.com/blog/2009/11/faq-opencv-haartraining/
for cf in self.cascade_files:
self._detect_Trained(*cf)
# barcode and Data Matrix recognition (libdmtx/pydmtx, zbar, gocr?)
self._recognize_OpticalCodes()
# Chessboard (opencv reference detector)
self._detect_Chessboard()
# general (self-trained) detection WITH classification
# BoW: uses feature detection (SIFT, SURF, ...) AND classification (SVM, ...)
# self._detectclassify_ObjectAll()
# Wavelet: uses wavelet transformation AND classification (machine learning)
# self._detectclassify_ObjectAll_PYWT()
# general file EXIF history information
self._detect_History()
return self._features
# supports a lot of different file types thanks to PIL
def _convert(self):
try:
im = Image.open(self.image_path) # might be png, gif etc, for instance
#im.thumbnail(size, Image.ANTIALIAS) # size is 640x480
im.convert('RGB').save(self.image_path_JPEG, "JPEG")
self.image_size = im.size
except IOError, e:
if 'image file is truncated' in str(e):
# im object has changed due to exception raised
im.convert('RGB').save(self.image_path_JPEG, "JPEG")
self.image_size = im.size
else:
try:
# since opencv might still work, try this as fall-back
img = cv2.imread( self.image_path, cv.CV_LOAD_IMAGE_COLOR )
cv2.imwrite(self.image_path_JPEG, img)
self.image_size = (img.shape[1], img.shape[0])
except:
if os.path.exists(self.image_path_JPEG):
os.remove(self.image_path_JPEG)
self.image_path_JPEG = self.image_path
except:
self.image_path_JPEG = self.image_path
# FULL TIFF support (e.g. group4)
# http://code.google.com/p/pylibtiff/
# MIME: 'image/jpeg; charset=binary', ...
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
result = {'Format': u'-', 'Length': -1}
try:
i = Image.open(self.image_path)
except IOError:
pywikibot.warning(u'unknown file type [_JpegFile]')
return
# http://mail.python.org/pipermail/image-sig/1999-May/000740.html
pc=0 # count number of pages
while True:
try:
i.seek(pc)
except EOFError:
break
pc+=1
i.seek(0) # restore default
# http://grokbase.com/t/python/image-sig/082psaxt6k/embedded-icc-profiles
# python-lcms (littlecms) may be freeimage library
#icc = i.app['APP2'] # jpeg
#icc = i.tag[34675] # tiff
#icc = re.sub('[^%s]'%string.printable, ' ', icc)
## more image formats and more post-processing needed...
#self.image_size = i.size
result.update({ #'bands': i.getbands(),
#'bbox': i.getbbox(),
'Format': i.format,
'Mode': i.mode,
#'info': i.info,
#'stat': os.stat(self.image_path),
'Palette': str(len(i.palette.palette)) if i.palette else u'-',
'Length': pc, # num. of pages
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), })
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
# .../opencv/samples/c/facedetect.cpp
# http://opencv.willowgarage.com/documentation/python/genindex.html
def _detect_Faces(self):
"""Converts an image to grayscale and prints the locations of any
faces found"""
# http://python.pastebin.com/m76db1d6b
# http://creatingwithcode.com/howto/face-detection-in-static-images-with-python/
# http://opencv.willowgarage.com/documentation/python/objdetect_cascade_classification.html
# http://opencv.willowgarage.com/wiki/FaceDetection
# http://blog.jozilla.net/2008/06/27/fun-with-python-opencv-and-face-detection/
# http://www.cognotics.com/opencv/servo_2007_series/part_4/index.html
# https://code.ros.org/trac/opencv/browser/trunk/opencv_extra/testdata/gpu/haarcascade?rev=HEAD
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml')
#xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_eye.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
#nestedCascade = cv.Load(
nestedCascade = cv2.CascadeClassifier(xml)
# http://tutorial-haartraining.googlecode.com/svn/trunk/data/haarcascades/
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_frontalface_alt.xml')
# MAY BE USE 'haarcascade_frontalface_alt_tree.xml' ALSO / INSTEAD...?!!
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
#cascade = cv.Load(
cascade = cv2.CascadeClassifier(xml)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_profileface.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascadeprofil = cv2.CascadeClassifier(xml)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_mcs_mouth.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascademouth = cv2.CascadeClassifier(xml)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_mcs_nose.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascadenose = cv2.CascadeClassifier(xml)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_lefteye_2splits.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascadelefteye = cv2.CascadeClassifier(xml) # (http://yushiqi.cn/research/eyedetection)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_righteye_2splits.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascaderighteye = cv2.CascadeClassifier(xml) # (http://yushiqi.cn/research/eyedetection)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_mcs_leftear.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascadeleftear = cv2.CascadeClassifier(xml)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_mcs_rightear.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascaderightear = cv2.CascadeClassifier(xml)
scale = 1.
# So, to find an object of an unknown size in the image the scan
# procedure should be done several times at different scales.
# http://opencv.itseez.com/modules/objdetect/doc/cascade_classification.html
try:
#image = cv.LoadImage(self.image_path)
#img = cv2.imread( self.image_path, cv.CV_LOAD_IMAGE_COLOR )
img = cv2.imread( self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR )
#image = cv.fromarray(img)
if img == None:
raise IOError
# !!! the 'scale' here IS RELEVANT FOR THE DETECTION RATE;
# how small and how many features are detected as faces (or eyes)
scale = max([1., np.average(np.array(img.shape)[0:2]/500.)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_Faces]')
return
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_Faces]')
return
#detectAndDraw( image, cascade, nestedCascade, scale );
# http://nullege.com/codes/search/cv.CvtColor
#smallImg = cv.CreateImage( (cv.Round(img.shape[0]/scale), cv.Round(img.shape[1]/scale)), cv.CV_8UC1 )
#smallImg = cv.fromarray(np.empty( (cv.Round(img.shape[0]/scale), cv.Round(img.shape[1]/scale)), dtype=np.uint8 ))
smallImg = np.empty( (cv.Round(img.shape[1]/scale), cv.Round(img.shape[0]/scale)), dtype=np.uint8 )
#cv.CvtColor( image, gray, cv.CV_BGR2GRAY )
gray = cv2.cvtColor( img, cv.CV_BGR2GRAY )
#cv.Resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR )
smallImg = cv2.resize( gray, smallImg.shape, interpolation=cv2.INTER_LINEAR )
#cv.EqualizeHist( smallImg, smallImg )
smallImg = cv2.equalizeHist( smallImg )
t = cv.GetTickCount()
faces = list(cascade.detectMultiScale( smallImg,
1.1, 2, 0
#|cv.CV_HAAR_FIND_BIGGEST_OBJECT
#|cv.CV_HAAR_DO_ROUGH_SEARCH
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
#faces = cv.HaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
# cv.CV_HAAR_DO_CANNY_PRUNING, (50,50))
facesprofil = list(cascadeprofil.detectMultiScale( smallImg,
1.1, 2, 0
#|cv.CV_HAAR_FIND_BIGGEST_OBJECT
#|cv.CV_HAAR_DO_ROUGH_SEARCH
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
#faces = self._util_merge_Regions(faces + facesprofil)[0]
faces = self._util_merge_Regions(faces + facesprofil, overlap=True)[0]
faces = np.array(faces)
#if faces:
# self._drawRect(faces) #call to a python pil
t = cv.GetTickCount() - t
#print( "detection time = %g ms\n" % (t/(cv.GetTickFrequency()*1000.)) )
#colors = [ (0,0,255),
# (0,128,255),
# (0,255,255),
# (0,255,0),
# (255,128,0),
# (255,255,0),
# (255,0,0),
# (255,0,255) ]
result = []
for i, r in enumerate(faces):
#color = colors[i%8]
(rx, ry, rwidth, rheight) = r
#cx = cv.Round((rx + rwidth*0.5)*scale)
#cy = cv.Round((ry + rheight*0.5)*scale)
#radius = cv.Round((rwidth + rheight)*0.25*scale)
#cv2.circle( img, (cx, cy), radius, color, 3, 8, 0 )
#if nestedCascade.empty():
# continue
# Wilson, Fernandez: FACIAL FEATURE DETECTION USING HAAR CLASSIFIERS
# http://nichol.as/papers/Wilson/Facial%20feature%20detection%20using%20Haar.pdf
#dx, dy = cv.Round(rwidth*0.5), cv.Round(rheight*0.5)
dx, dy = cv.Round(rwidth/8.), cv.Round(rheight/8.)
(rx, ry, rwidth, rheight) = (max([rx-dx,0]), max([ry-dy,0]), min([rwidth+2*dx,img.shape[1]]), min([rheight+2*dy,img.shape[0]]))
#smallImgROI = smallImg
#print r, (rx, ry, rwidth, rheight)
#smallImgROI = smallImg[ry:(ry+rheight),rx:(rx+rwidth)]
smallImgROI = smallImg[ry:(ry+6*dy),rx:(rx+rwidth)] # speed up by setting instead of extracting ROI
nestedObjects = nestedCascade.detectMultiScale( smallImgROI,
1.1, 2, 0
#|CV_HAAR_FIND_BIGGEST_OBJECT
#|CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) )
nestedObjects = self._util_merge_Regions(list(nestedObjects), overlap=True)[0]
if len(nestedObjects) < 2:
nestedLeftEye = cascadelefteye.detectMultiScale( smallImgROI,
1.1, 2, 0
#|CV_HAAR_FIND_BIGGEST_OBJECT
#|CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) )
nestedRightEye = cascaderighteye.detectMultiScale( smallImgROI,
1.1, 2, 0
#|CV_HAAR_FIND_BIGGEST_OBJECT
#|CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) )
nestedObjects = self._util_merge_Regions(list(nestedObjects) +
list(nestedLeftEye) +
list(nestedRightEye), overlap=True)[0]
#if len(nestedObjects) > 2:
# nestedObjects = self._util_merge_Regions(list(nestedObjects), close=True)[0]
smallImgROI = smallImg[(ry+4*dy):(ry+rheight),rx:(rx+rwidth)]
nestedMouth = cascademouth.detectMultiScale( smallImgROI,
1.1, 2, 0
|cv.CV_HAAR_FIND_BIGGEST_OBJECT
|cv.CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) )
smallImgROI = smallImg[(ry+(5*dy)/2):(ry+5*dy+(5*dy)/2),(rx+(5*dx)/2):(rx+5*dx+(5*dx)/2)]
nestedNose = cascadenose.detectMultiScale( smallImgROI,
1.1, 2, 0
|cv.CV_HAAR_FIND_BIGGEST_OBJECT
|cv.CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) )
smallImgROI = smallImg[(ry+2*dy):(ry+6*dy),rx:(rx+rwidth)]
nestedEars = list(cascadeleftear.detectMultiScale( smallImgROI,
1.1, 2, 0
|cv.CV_HAAR_FIND_BIGGEST_OBJECT
|cv.CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
nestedEars += list(cascaderightear.detectMultiScale( smallImgROI,
1.1, 2, 0
|cv.CV_HAAR_FIND_BIGGEST_OBJECT
|cv.CV_HAAR_DO_ROUGH_SEARCH
#|CV_HAAR_DO_CANNY_PRUNING
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
data = { 'ID': (i+1),
'Position': tuple(np.int_(r*scale)),
'Type': u'-',
'Eyes': [],
'Mouth': (),
'Nose': (),
'Ears': [],
'Pose': (), }
data['Coverage'] = float(data['Position'][2]*data['Position'][3])/(self.image_size[0]*self.image_size[1])
#if (c >= confidence):
# eyes = nestedObjects
# if not (type(eyes) == type(tuple())):
# eyes = tuple((eyes*scale).tolist())
# result.append( {'Position': r*scale, 'eyes': eyes, 'confidence': c} )
#print {'Position': r, 'eyes': nestedObjects, 'confidence': c}
for nr in nestedObjects:
(nrx, nry, nrwidth, nrheight) = nr
cx = cv.Round((rx + nrx + nrwidth*0.5)*scale)
cy = cv.Round((ry + nry + nrheight*0.5)*scale)
radius = cv.Round((nrwidth + nrheight)*0.25*scale)
#cv2.circle( img, (cx, cy), radius, color, 3, 8, 0 )
data['Eyes'].append( (cx-radius, cy-radius, 2*radius, 2*radius) )
if len(nestedMouth):
(nrx, nry, nrwidth, nrheight) = nestedMouth[0]
cx = cv.Round((rx + nrx + nrwidth*0.5)*scale)
cy = cv.Round(((ry+4*dy) + nry + nrheight*0.5)*scale)
radius = cv.Round((nrwidth + nrheight)*0.25*scale)
#cv2.circle( img, (cx, cy), radius, color, 3, 8, 0 )
data['Mouth'] = (cx-radius, cy-radius, 2*radius, 2*radius)
if len(nestedNose):
(nrx, nry, nrwidth, nrheight) = nestedNose[0]
cx = cv.Round(((rx+(5*dx)/2) + nrx + nrwidth*0.5)*scale)
cy = cv.Round(((ry+(5*dy)/2) + nry + nrheight*0.5)*scale)
radius = cv.Round((nrwidth + nrheight)*0.25*scale)
#cv2.circle( img, (cx, cy), radius, color, 3, 8, 0 )
data['Nose'] = (cx-radius, cy-radius, 2*radius, 2*radius)
for nr in nestedEars:
(nrx, nry, nrwidth, nrheight) = nr
cx = cv.Round((rx + nrx + nrwidth*0.5)*scale)
cy = cv.Round((ry + nry + nrheight*0.5)*scale)
radius = cv.Round((nrwidth + nrheight)*0.25*scale)
#cv2.circle( img, (cx, cy), radius, color, 3, 8, 0 )
data['Ears'].append( (cx-radius, cy-radius, 2*radius, 2*radius) )
if data['Mouth'] and data['Nose'] and data['Eyes'] and (len(data['Eyes']) == 2):
# head model "little girl" for use in "MeshLab":
# http://www.turbosquid.com/FullPreview/Index.cfm/ID/302581
# http://meshlab.sourceforge.net/
D3points = [[ 70.0602, 109.898, 20.8234], # left eye
[ 2.37427, 110.322, 21.7776], # right eye
[ 36.8301, 78.3185, 52.0345], # nose
[ 36.6391, 51.1675, 38.5903],] # mouth
#[ 119.268, 91.3111, -69.6397], # left ear
#[-49.1328, 91.3111, -67.2481],] # right ear
D2points = [np.array(data['Eyes'][0]), np.array(data['Eyes'][1]),
np.array(data['Nose']), np.array(data['Mouth']),]
D2points = [ item[:2] + item[2:]/2. for item in D2points ]
neutral = np.array([[np.pi],[0.],[0.]])
# calculate pose
rvec, tvec, cm, err = self._util_get_Pose_solvePnP(D3points, D2points, self.image_size)
#data['Pose'] = tuple(rvec[:,0])
check = not (err[:,0,:].max() > 0.5)
if not check:
rvec = neutral # reset to neutral pose
tvec = np.array([[0.],[0.],[100.]]) # reset to neutral position (same order as max of D3points)
pywikibot.warning(u'Could not calculate pose of face, too big errors. '
u'(looks like neutral pose/position is somehow singular)')
## debug: draw pose
##rvec *= 0
#mat, perp = self._util_getD2coords_calc(np.eye(3), cm, rvec, tvec, hacky=False)
## from '_util_drawAxes(...)'
#for i, item in enumerate(mat.transpose()):
# p = tuple((50+10*item).astype(int))[:2]
# cv2.line(img, (50, 50), p, (0., 0., 255.), 1)
# cv2.putText(img, str(i), p, cv2.FONT_HERSHEY_PLAIN, 1., (0., 0., 255.))
#cv2.imshow("win", img)
#cv2.waitKey()
# calculate delta to neutral pose
drv = -cv2.composeRT(-rvec, np.zeros((3,1)),
neutral, np.zeros((3,1)))[0]
rvec = cv2.Rodrigues(cv2.Rodrigues(rvec)[0])[0] # NOT unique!!!
#nrv = cv2.composeRT(neutral, np.zeros((3,1)),
# drv, np.zeros((3,1)))[0]
#print (rvec - nrv < 1E-12) # compare
data['Pose'] = map(float, tuple(drv[:,0]))
# TODO: POSIT has to be tested and compared; draw both results!
# POSIT: http://www.cfar.umd.edu/~daniel/daniel_papersfordownload/Pose25Lines.pdf
if False:
pywikibot.output("solvePnP:")
pywikibot.output(str(rvec[:,0]))
pywikibot.output(str(tvec[:,0]))
pywikibot.output(str(err[:,0,:]))
rvec, tvec, cm, err = self._util_get_Pose_POSIT(D3points, D2points)
pywikibot.output("POSIT:")
pywikibot.output(str(rvec[:,0]))
pywikibot.output(str(tvec))
pywikibot.output(str(np.array(err)[:,0,:]/max(self.image_size)))
result.append( data )
## see '_drawRect'
#if result:
# #image_path_new = os.path.join(scriptdir, 'cache/0_DETECTED_' + self.image_filename)
# image_path_new = self.image_path_JPEG.replace(u"cache/", u"cache/0_DETECTED_")
# cv2.imwrite( image_path_new, img )
#return faces.tolist()
self._features['Faces'] += result
return
def _util_get_Pose_solvePnP(self, D3points, D2points, shape):
""" Calculate pose from head model "little girl" w/o camera or other
calibrations needed.
D2points: left eye, right eye, nose, mouth
"""
# howto (credits to "Roy"):
# http://www.youtube.com/watch?v=ZDNH4BT5Do4
# http://www.morethantechnical.com/2010/03/19/quick-and-easy-head-pose-estimation-with-opencv-w-code/
# http://www.morethantechnical.com/2012/10/17/head-pose-estimation-with-opencv-opengl-revisited-w-code/
# e.g. with head model "little girl" for use in "MeshLab":
# http://www.turbosquid.com/FullPreview/Index.cfm/ID/302581
# http://meshlab.sourceforge.net/
# set-up camera matrix (no calibration needed!)
max_d = max(shape)
cameraMatrix = [[max_d, 0, shape[0]/2.0],
[ 0, max_d, shape[1]/2.0],
[ 0, 0, 1.0],]
# calculate pose
rvec, tvec = cv2.solvePnP(np.array(D3points).astype('float32'), np.array(D2points).astype('float32'), np.array(cameraMatrix).astype('float32'), None)
# compare to 2D points
err = []
for i, vec in enumerate(np.array(D3points)):
nvec = np.dot(cameraMatrix, (np.dot(cv2.Rodrigues(rvec)[0], vec) + tvec[:,0]))
err.append(((D2points[i] - nvec[:2]/nvec[2]), D2points[i], nvec[:2]/nvec[2]))
pywikibot.output(u'result for UN-calibrated camera:\n rot=%s' % rvec.transpose()[0])
return rvec, tvec, np.array(cameraMatrix), (np.array(err)/max_d)
#def _util_get_Pose_POSIT(self, D3points, D2points, shape):
def _util_get_Pose_POSIT(self, D3points, D2points):
""" Calculate pose from head model "little girl" w/o camera or other
calibrations needed.
Method similar to '_util_get_Pose_solvePnP', please compare.
D2points: left eye, right eye, nose, mouth
"""
# calculate pose
import opencv
#opencv.unit_test()
(rmat, tvec, mdl) = opencv.posit(D3points, D2points, (100, 1.0e-4))
rvec = cv2.Rodrigues(rmat)[0]
# Project the model points with the estimated pose
# http://opencv.willowgarage.com/documentation/cpp/camera_calibration_and_3d_reconstruction.html
# intrinsic: camera matrix
# extrinsic: rotation-translation matrix [R|t]
# CV_32F, principal point in the centre of the image is (0, 0) instead of (self.image_size[0]*0.5)
FOCAL_LENGTH = 760.0 # hard-coded in posit_python.cpp, should be changed...
cameraMatrix = [[FOCAL_LENGTH, 0.0, 0.0],#shape[0]*0.0],
[ 0.0, FOCAL_LENGTH, 0.0],#shape[1]*0.0],
[ 0.0, 0.0, 1.0],]
# compare to 2D points
err = []
for i, vec in enumerate(np.array(mdl)):
nvec = np.dot(cameraMatrix, (np.dot(rmat, vec) + tvec))
err.append(((D2points[i] - nvec[:2]/nvec[2]), D2points[i], nvec[:2]/nvec[2]))
#pywikibot.output(u'result for UN-calibrated camera:\n rot=%s' % rvec.transpose()[0])
return rvec, tvec, np.array(cameraMatrix), (np.array(err)/1.0)
# https://pypi.python.org/pypi/xbob.flandmark
# http://cmp.felk.cvut.cz/~uricamic/flandmark/
def _detect_FaceLandmark_xBOB(self):
"""Prints the locations of any face landmark(s) found, respective
converts them to usual face position data"""
scale = 1.
try:
#video = bob.io.VideoReader(self.image_path_JPEG.encode('utf-8'))
video = [cv2.imread( self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR )]
#if img == None:
# raise IOError
# !!! the 'scale' here IS RELEVANT FOR THE DETECTION RATE;
# how small and how many features are detected as faces (or eyes)
scale = max([1., np.average(np.array(video[0].shape)[0:2]/750.)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_FaceLandmark_xBOB]')
return
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_FaceLandmark_xBOB]')
return
smallImg = np.empty( (cv.Round(video[0].shape[1]/scale), cv.Round(video[0].shape[0]/scale)), dtype=np.uint8 )
video = [ cv2.resize( img, smallImg.shape, interpolation=cv2.INTER_LINEAR ) for img in video ]
sys.path.append(os.path.join(scriptdir, 'dtbext'))
import _bob as bob
import xbob_flandmark as xbob
localize = xbob.flandmark.Localizer()
result = []
for frame in video: # currently ALWAYS contains ONE (1!) entry
frame = np.transpose(frame, (2,0,1))
img = np.transpose(frame, (1,2,0))
for i, flm in enumerate(localize(frame)):
#for pi, point in enumerate(flm['landmark']):
# cv2.circle(img, tuple(map(int, point)), 3, ( 0, 0, 255))
# cv2.circle(img, tuple(map(int, point)), 5, ( 0, 255, 0))
# cv2.circle(img, tuple(map(int, point)), 7, (255, 0, 0))
# cv2.putText(img, str(pi), tuple(map(int, point)), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,255,0))
#cv2.rectangle(img, tuple(map(int, flm['bbox'][:2])), tuple(map(int, (flm['bbox'][0]+flm['bbox'][2], flm['bbox'][1]+flm['bbox'][3]))), (0, 255, 0))
mat = np.array([flm['landmark'][3], flm['landmark'][4]])
mi = np.min(mat, axis=0)
mouth = tuple(mi.astype(int)) + tuple((np.max(mat, axis=0)-mi).astype(int))
#cv2.rectangle(img, tuple(mi.astype(int)), tuple(np.max(mat, axis=0).astype(int)), (0, 255, 0))
mat = np.array([flm['landmark'][5], flm['landmark'][1]])
mi = np.min(mat, axis=0)
leye = tuple(mi.astype(int)) + tuple((np.max(mat, axis=0)-mi).astype(int))
#cv2.rectangle(img, tuple(mi.astype(int)), tuple(np.max(mat, axis=0).astype(int)), (0, 255, 0))
mat = np.array([flm['landmark'][2], flm['landmark'][6]])
mi = np.min(mat, axis=0)
reye = tuple(mi.astype(int)) + tuple((np.max(mat, axis=0)-mi).astype(int))
#cv2.rectangle(img, tuple(mi.astype(int)), tuple(np.max(mat, axis=0).astype(int)), (0, 255, 0))
data = { 'ID': (i+1),
'Position': flm['bbox'],
'Type': u'Landmark',
'Eyes': [leye, reye],
'Mouth': mouth,
'Nose': tuple(np.array(flm['landmark'][7]).astype(int)) + (0, 0),
'Ears': [],
'Landmark': [tuple(lm) for lm in np.array(flm['landmark']).astype(int)], }
data['Coverage'] = float(data['Position'][2]*data['Position'][3])/(self.image_size[0]*self.image_size[1])
result.append(data)
#img = img.astype('uint8')
#cv2.imshow("people detector", img)
#cv2.waitKey()
self._features['Faces'] += result
return
# .../opencv/samples/cpp/peopledetect.cpp
# + Haar/Cascade detection
def _detect_People(self):
# http://stackoverflow.com/questions/10231380/graphic-recognition-of-people
# https://code.ros.org/trac/opencv/ticket/1298
# http://opencv.itseez.com/modules/gpu/doc/object_detection.html
# http://opencv.willowgarage.com/documentation/cpp/basic_structures.html
# http://www.pygtk.org/docs/pygtk/class-gdkrectangle.html
scale = 1.
try:
img = cv2.imread(self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR)
if (img == None) or (min(img.shape[:2]) < 100) or (not img.data) \
or (self.image_size[0] is None):
return
# !!! the 'scale' here IS RELEVANT FOR THE DETECTION RATE;
# how small and how many features are detected
#scale = max([1., np.average(np.array(img.shape)[0:2]/500.)])
scale = max([1., np.average(np.array(img.shape)[0:2]/400.)])
#scale = max([1., np.average(np.array(img.shape)[0:2]/300.)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_People]')
return
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_People]')
return
# similar to face detection
smallImg = np.empty( (cv.Round(img.shape[1]/scale), cv.Round(img.shape[0]/scale)), dtype=np.uint8 )
#gray = cv2.cvtColor( img, cv.CV_BGR2GRAY )
gray = img
smallImg = cv2.resize( gray, smallImg.shape, interpolation=cv2.INTER_LINEAR )
#smallImg = cv2.equalizeHist( smallImg )
img = smallImg
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
#cv2.namedWindow("people detector", 1)
found = found_filtered = []
#t = time.time()
# run the detector with default parameters. to get a higher hit-rate
# (and more false alarms, respectively), decrease the hitThreshold and
# groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
# detectMultiScale(img, hit_threshold=0, win_stride=Size(),
# padding=Size(), scale0=1.05, group_threshold=2)
enable_recovery() # enable recovery from hard crash
ret = hog.detectMultiScale(img, 0.25, (8,8), (32,32), 1.05, 2)
disable_recovery() # disable since everything worked out fine
if cv2.__version__ == '$Rev: 4557 $': # TS
found = ret
else: #'2.4.5' or else (e.g. on fedora 18)
found, w = ret
found = list(found)
# people haar/cascaded classifier
# use 'haarcascade_fullbody.xml', ... also (like face detection)
xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_fullbody.xml')
#xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_lowerbody.xml')
#xml = os.path.join(scriptdir, 'externals/opencv/haarcascades/haarcascade_upperbody.xml')
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascade = cv2.CascadeClassifier(xml)
objects = list(cascade.detectMultiScale( smallImg,
1.1, 3, 0
#|cv.CV_HAAR_FIND_BIGGEST_OBJECT
#|cv.CV_HAAR_DO_ROUGH_SEARCH
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
found += objects
#t = time.time() - t
#print("tdetection time = %gms\n", t*1000.)
bbox = gtk.gdk.Rectangle(*(0,0,img.shape[1],img.shape[0]))
# exclude duplicates (see also in 'classifyFeatures()')
found_filtered = [gtk.gdk.Rectangle(*f) for f in self._util_merge_Regions(found, sub=True)[0]]
result = []
for i in range(len(found_filtered)):
r = found_filtered[i]
# the HOG detector returns slightly larger rectangles than the real objects.
# so we slightly shrink the rectangles to get a nicer output.
r.x += cv.Round(r.width*0.1)
r.width = cv.Round(r.width*0.8)
r.y += cv.Round(r.height*0.07)
r.height = cv.Round(r.height*0.8)
data = { 'ID': (i+1), }
#'Center': (int(r.x + r.width*0.5), int(r.y + r.height*0.5)), }
# crop to image size (because of the slightly bigger boxes)
r = bbox.intersect(r)
#cv2.rectangle(img, (r.x, r.y), (r.x+r.width, r.y+r.height), cv.Scalar(0,255,0), 3)
data['Position'] = tuple(np.int_(np.array(r)*scale))
data['Coverage'] = float(data['Position'][2]*data['Position'][3])/(self.image_size[0]*self.image_size[1])
result.append( data )
#cv2.imshow("people detector", img)
#c = cv2.waitKey(0) & 255
self._features['People'] = result
return
def _detect_Geometry(self):
result = self._util_get_Geometry_CVnSCIPY()
self._features['Geometry'] = [{'Lines': result['Lines'],
'Circles': result['Circles'],
'Corners': result['Corners'],}]
return
# https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/python/houghlines.py?rev=2770
def _util_get_Geometry_CVnSCIPY(self):
# http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#cornerharris
# http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#houghcircles
# http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#houghlines
# http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#houghlinesp
if hasattr(self, '_buffer_Geometry'):
return self._buffer_Geometry
self._buffer_Geometry = {'Lines': '-', 'Circles': '-', 'Edge_Ratio': '-', 'Corners': '-',
'FFT_Peaks': '-'}
scale = 1.
try:
img = cv2.imread(self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR)
if (img == None):
raise IOError
# !!! the 'scale' here IS RELEVANT FOR THE DETECTION RATE;
# how small and how many features are detected
scale = max([1., np.average(np.array(img.shape)[0:2]/500.)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_Geometry]')
return self._buffer_Geometry
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_Geometry]')
return self._buffer_Geometry
# similar to face or people detection
smallImg = np.empty( (cv.Round(img.shape[1]/scale), cv.Round(img.shape[0]/scale)), dtype=np.uint8 )
_gray = cv2.cvtColor( img, cv.CV_BGR2GRAY )
# smooth it, otherwise a lot of false circles may be detected
#gray = cv2.GaussianBlur( _gray, (9, 9), 2 )
gray = cv2.GaussianBlur( _gray, (5, 5), 2 )
smallImg = cv2.resize( gray, smallImg.shape, interpolation=cv2.INTER_LINEAR )
#smallImg = cv2.equalizeHist( smallImg )
src = smallImg
# https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/python/houghlines.py?rev=2770
#dst = cv2.Canny(src, 50, 200)
dst = cv2.Canny(src, 10, 10)
edges = cv2.Canny(src, 10, 10)
#color_dst = cv2.cvtColor(dst, cv.CV_GRAY2BGR)
# edges (in this sensitve form a meassure for color gradients)
data = {}
data['Edge_Ratio'] = float((edges != 0).sum())/(edges.shape[0]*edges.shape[1])
# lines
USE_STANDARD = True
if USE_STANDARD:
#lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
#lines = cv2.HoughLines(dst, 1, math.pi / 180, 100)
lines = cv2.HoughLines(dst, 1, math.pi / 180, 200)
if (lines is not None) and len(lines):
lines = lines[0]
data['Lines'] = len(lines)
#for (rho, theta) in lines[:100]:
# a = math.cos(theta)
# b = math.sin(theta)
# x0 = a * rho
# y0 = b * rho
# pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
# pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
# cv2.line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
else:
#lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10)
lines = cv2.HoughLinesP(dst, 1, math.pi / 180, 100)
#for line in lines:
# cv2.line(color_dst, line[0], line[1], cv.CV_RGB(255, 0, 0), 3, 8)
# circles
try:
#circles = cv2.HoughCircles(src, cv.CV_HOUGH_GRADIENT, 2, src.shape[0]/4)#, 200, 100 )
circles = cv2.HoughCircles(src, cv.CV_HOUGH_GRADIENT, 2, src.shape[0]/4, param2=200)
except cv2.error:
circles = None
if (circles is not None) and len(circles):
circles = circles[0]
data['Circles'] = len(circles)
#for c in circles:
# center = (cv.Round(c[0]), cv.Round(c[1]))
# radius = cv.Round(c[2])
# # draw the circle center
# cv2.circle( color_dst, center, 3, cv.CV_RGB(0,255,0), -1, 8, 0 )
# # draw the circle outline
# cv2.circle( color_dst, center, radius, cv.CV_RGB(0,0,255), 3, 8, 0 )
# corners
corner_dst = cv2.cornerHarris( edges, 2, 3, 0.04 )
# Normalizing
cv2.normalize( corner_dst, corner_dst, 0, 255, cv2.NORM_MINMAX, cv.CV_32FC1 )
#dst_norm_scaled = cv2.convertScaleAbs( corner_dst )
# Drawing a circle around corners
corner = []
for j in range(corner_dst.shape[0]):
for i in range(corner_dst.shape[1]):
if corner_dst[j,i] > 200:
#circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
corner.append( (j,i) )
data['Corners'] = len(corner)
#cv2.imshow("people detector", color_dst)
#c = cv2.waitKey(0) & 255
# fft spectral/frequency/momentum analysis with svd peak detection
gray = cv2.resize( _gray, smallImg.shape, interpolation=cv2.INTER_LINEAR )
##s = (self.image_size[1], self.image_size[0])
#s = gray.shape
fft = fftpack.fftn(gray)
#fft = np.fft.fftn(gray)
#Image.fromarray(fft.real).show()
# shift quadrants so that low spatial frequencies are in the center
fft = fftpack.fftshift(fft)
#Image.fromarray(fft.real).show()
##Image.fromarray(fftpack.ifftn(fft).real).show()
##Image.fromarray(fftpack.ifftn(fftpack.ifftshift(fft)).real).show()
##Image.fromarray(fftpack.ifftn(fftpack.ifftshift(fft.real)).real).show()
# (scipy svd has more options...)
#U, S, Vh = linalg.svd(np.matrix(fft)) # scipy; unstable, crashes with C core dump
#U, S, Vh = np.linalg.svd(np.matrix(fft)) # numpy (full matrix); unstable, ----"-----
#U, S, Vh = np.linalg.svd(np.matrix(fft), full_matrices=False) # less memory; more stable
S = np.linalg.svd(np.matrix(fft), compute_uv=False) # less memory, faster; more stable
ma = 0.01*max(S)
count = sum([int(c > ma) for c in S])
#SS = np.zeros(s)
#ss = min(s)
#for i in range(0, len(S)-1, max( int(len(S)/100.), 1 )): # (len(S)==ss) -> else; problem!
# #SS = np.zeros(s)
# #SS[:(ss-i),:(ss-i)] = np.diag(S[:(ss-i)])
# SS[:(i+1),:(i+1)] = np.diag(S[:(i+1)])
# #Image.fromarray((np.dot(np.dot(U, SS), Vh) - fft).real).show()
# #Image.fromarray(fftpack.ifftn(fftpack.ifftshift(np.dot(np.dot(U, SS), Vh))).real - gray).show()
# print i, ((np.dot(np.dot(U, SS), Vh) - fft).real).max()
# print i, (fftpack.ifftn(fftpack.ifftshift(np.dot(np.dot(U, SS), Vh))).real - gray).max()
# #if ((np.dot(np.dot(U, SS), Vh) - fft).max() < (255/4.)):
# # break
#data['SVD_Comp'] = float(i)/ss
#data['SVD_Min'] = S[:(i+1)].min()
data['FFT_Peaks'] = float(count)/len(S)
#pywikibot.output( u'FFT_Peaks: %s' % data['FFT_Peaks'] )
# use wavelet transformation (FWT) from e.g. pywt, scipy signal or mlpy
# (may be other) in addition to FFT and compare the spectra with FFT...
# confer; "A Practical Guide to Wavelet Analysis" (http://journals.ametsoc.org/doi/pdf/10.1175/1520-0477%281998%29079%3C0061%3AAPGTWA%3E2.0.CO%3B2)
# on how to convert and adopt FFT and wavlet spectra frequency scales
if data:
self._buffer_Geometry.update(data)
return self._buffer_Geometry
# .../opencv/samples/cpp/bagofwords_classification.cpp
def _detectclassify_ObjectAll(self):
"""Uses the 'The Bag of Words model' for detection and classification"""
# CAN ALSO BE USED FOR: TEXT, ...
# http://app-solut.com/blog/2011/07/the-bag-of-words-model-in-opencv-2-2/
# http://app-solut.com/blog/2011/07/using-the-normal-bayes-classifier-for-image-categorization-in-opencv/
# http://authors.library.caltech.edu/7694/
# http://www.vision.caltech.edu/Image_Datasets/Caltech256/
# http://opencv.itseez.com/modules/features2d/doc/object_categorization.html
# http://www.morethantechnical.com/2011/08/25/a-simple-object-classifier-with-bag-of-words-using-opencv-2-3-w-code/
# source: https://github.com/royshil/FoodcamClassifier
# http://app-solut.com/blog/2011/07/using-the-normal-bayes-classifier-for-image-categorization-in-opencv/
# source: http://code.google.com/p/open-cv-bow-demo/downloads/detail?name=bowdemo.tar.gz&can=2&q=
# parts of code here should/have to be placed into e.g. a own
# class in 'dtbext/opencv/__init__.py' script/module
trained = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor',]
bowDescPath = os.path.join(scriptdir, 'dtbext/opencv/data/bowImageDescriptors/000000.xml.gz')
# https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/bagofwords_classification.cpp?rev=3714
# stand-alone (in shell) for training e.g. with:
# BoWclassify /data/toolserver/pywikipedia/dtbext/opencv/VOC2007 /data/toolserver/pywikipedia/dtbext/opencv/data FAST SURF BruteForce | tee run.log
# BoWclassify /data/toolserver/pywikipedia/dtbext/opencv/VOC2007 /data/toolserver/pywikipedia/dtbext/opencv/data HARRIS SIFT BruteForce | tee run.log
# http://experienceopencv.blogspot.com/2011/02/object-recognition-bag-of-keypoints.html
import opencv
#opencv.unit_test()
if os.path.exists(bowDescPath):
os.remove(bowDescPath)
stdout = sys.stdout
sys.stdout = StringIO.StringIO()
#result = opencv.BoWclassify.main(0, '', '', '', '', '')
result = opencv.BoWclassify(6,
os.path.join(scriptdir, 'dtbext/opencv/VOC2007'),
os.path.join(scriptdir, 'dtbext/opencv/data'),
'HARRIS', # not important; given by training
'SIFT', # not important; given by training
'BruteForce', # not important; given by training
[str(os.path.abspath(self.image_path).encode('latin-1'))])
#out = sys.stdout.getvalue()
sys.stdout = stdout
#print out
if not result:
raise ImportError("BoW did not resolve; no results found!")
os.remove(bowDescPath)
# now make the algo working; confer also
# http://www.xrce.xerox.com/layout/set/print/content/download/18763/134049/file/2004_010.pdf
# http://people.csail.mit.edu/torralba/shortCourseRLOC/index.html
self._features['Classify'] = [dict([ (trained[i], r) for i, r in enumerate(result) ])]
return
def _detectclassify_ObjectAll_PYWT(self):
"""Uses the 'Fast Wavelet-Based Visual Classification' for detection
and classification"""
# Fast Wavelet-Based Visual Classification
# http://www.cmap.polytechnique.fr/~yu/publications/ICPR08Final.pdf
# CAN ALSO BE USED FOR: TEXT, AUDIO, (VIDEO), ...
# TODO: for audio and video (time-based) also...!!!
import pywt # python-pywt
# TODO: improve (honestly; truly apply) wavelet in a meaningful and USEFUL (correct) way/manner!
# TODO: truly apply FFT and SVD (used before)
# wavelet transformation
# https://github.com/nigma/pywt/tree/master/demo
# image_blender, dwt_signal_decomposition.py, wp_scalogram.py, dwt_multidim.py, user_filter_banks.py:
#coeffs = pywt.dwtn(gray, 'db1') # Single-level n-dimensional Discrete Wavelet Transform
coeffs = pywt.dwt2(gray, 'db1') # 2D Discrete Wavelet Transform
#coeffs = pywt.wavedec2(gray, 'db1') # Multilevel 2D Discrete Wavelet Transform
pass
result = pywt.idwt2(coeffs, 'db1') # 2D Inverse Discrete Wavelet Transform
#result = pywt.waverec2(coeffs, 'db1') # Multilevel 2D Inverse Discrete Wavelet Transform
result = result[:gray.shape[0],:gray.shape[1]]
# consider 'swt' (2D Stationary Wavelet Transform) instead of 'dwt' too
pywikibot.output(u'%s' % coeffs)
pywikibot.output(u'%s' % np.abs(result - gray).max())
#data['Wavelet_Comp'] = coeffs
# https://github.com/nigma/pywt/blob/master/demo/image_blender.py
# http://www.ncbi.nlm.nih.gov/pubmed/18713675
# https://github.com/nigma/pywt/blob/master/demo/wp_scalogram.py
# https://github.com/nigma/pywt/blob/master/demo/swt2.py
return
# a lot more paper and possible algos exist; (those with code are...)
# http://www.lix.polytechnique.fr/~schwander/python-srm/
# http://library.wolfram.com/infocenter/Demos/5725/#downloads
# http://code.google.com/p/pymeanshift/wiki/Examples
# (http://pythonvision.org/basic-tutorial, http://luispedro.org/software/mahotas, http://packages.python.org/pymorph/)
def _detect_SegmentColors(self): # may be SLIC other other too...
try:
#im = Image.open(self.image_path).convert(mode = 'RGB')
im = Image.open(self.image_path_JPEG)
## crop 25% of the image in order to give the bot a more human eye
## (needed for categorization only and thus should be done there/later)
#scale = 0.75 # crop 25% percent (area) bounding box
#(w, h) = ( self.image_size[0]*math.sqrt(scale), self.image_size[1]*math.sqrt(scale) )
#(l, t) = ( (self.image_size[0]-w)/2, (self.image_size[1]-h)/2 )
#i = im.crop( (int(l), int(t), int(l+w), int(t+h)) )
(l, t) = (0, 0)
i = im
except IOError:
pywikibot.warning(u'unknown file type [_detect_SegmentColors]')
return
result = []
try:
#h = i.histogram() # average over WHOLE IMAGE
(pic, scale) = self._util_detect_ColorSegments_JSEG(i) # split image into segments first
#(pic, scale) = self._util_detect_ColorSegments_SLIC(i) # split image into superpixel first
hist = self._util_get_ColorSegmentsHist_PIL(i, pic, scale) #
#pic = self._util_merge_ColorSegments(pic, hist) # iteratively in order to MERGE similar regions
#(pic, scale_) = self._util_detect_ColorSegments_JSEG(pic) # (final split)
##(pic, scale) = self._util_detect_ColorSegments_JSEG(pic) # (final split)
#hist = self._util_get_ColorSegmentsHist_PIL(i, pic, scale) #
except TypeError:
pywikibot.warning(u'unknown file type [_detect_SegmentColors]')
return
i = 0
# (may be do an additional region merge according to same color names...)
for (h, coverage, (center, bbox)) in hist:
if (coverage < 0.05): # at least 5% coverage needed (help for debugging/log_output)
continue
data = self._util_average_Color_colormath(h)
data['Coverage'] = float(coverage)
data['ID'] = (i+1)
data['Center'] = (int(center[0]+l), int(center[1]+t))
data['Position'] = (int(bbox[0]+l), int(bbox[1]+t), int(bbox[2]), int(bbox[3]))
data['Delta_R'] = math.sqrt( (self.image_size[0]/2 - center[0])**2 + \
(self.image_size[1]/2 - center[1])**2 )
result.append( data )
i += 1
self._features['ColorRegions'] = result
return
# http://stackoverflow.com/questions/2270874/image-color-detection-using-python
# https://gist.github.com/1246268
# colormath-1.0.8/examples/delta_e.py, colormath-1.0.8/examples/conversions.py
# http://code.google.com/p/python-colormath/
# http://en.wikipedia.org/wiki/Color_difference
# http://www.farb-tabelle.de/en/table-of-color.htm
def _detect_AverageColor(self):
try:
# we need to have 3 channels (but e.g. grayscale 'P' has only 1)
#i = Image.open(self.image_path).convert(mode = 'RGB')
i = Image.open(self.image_path_JPEG)
h = i.histogram()
except IOError:
pywikibot.warning(u'unknown file type [_detect_AverageColor]')
return
result = self._util_average_Color_colormath(h)
result['Gradient'] = self._util_get_Geometry_CVnSCIPY().get('Edge_Ratio', None) or '-'
result['FFT_Peaks'] = self._util_get_Geometry_CVnSCIPY().get('FFT_Peaks', None) or '-'
self._features['ColorAverage'] = [result]
return
# http://stackoverflow.com/questions/2270874/image-color-detection-using-python
# https://gist.github.com/1246268
# colormath-1.0.8/examples/delta_e.py, colormath-1.0.8/examples/conversions.py
# http://code.google.com/p/python-colormath/
# http://en.wikipedia.org/wiki/Color_difference
# http://www.farb-tabelle.de/en/table-of-color.htm
# http://www5.konicaminolta.eu/de/messinstrumente/color-light-language.html
def _util_average_Color_colormath(self, h):
# split into red, green, blue
r = h[0:256]
g = h[256:256*2]
b = h[256*2: 256*3]
# perform the weighted average of each channel:
# the *index* 'i' is the channel value, and the *value* 'w' is its weight
rgb = (
sum( i*w for i, w in enumerate(r) ) / max(1, sum(r)),
sum( i*w for i, w in enumerate(g) ) / max(1, sum(g)),
sum( i*w for i, w in enumerate(b) ) / max(1, sum(b))
)
# count number of colors used more than 1% of maximum
ma = 0.01*max(h)
count = sum([int(c > ma) for c in h])
# # TODO: peak detection (not supported by my local scipy version yet)
# # http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks_cwt.html
# peakind = signal.find_peaks_cwt(fft, np.arange(1,10))
# print peaks
# print len(peakind), peakind
data = { #'histogram': h,
'RGB': rgb,
'Peaks': float(count)/len(h), }
#colors = pycolorname.RAL.colors
#colors = pycolorname.pantone.Formula_Guide_Solid
colors = pycolorname.pantone.Fashion_Home_paper
#print "=== RGB Example: RGB->LAB ==="
# Instantiate an Lab color object with the given values.
rgb = RGBColor(rgb[0], rgb[1], rgb[2], rgb_type='sRGB')
# Show a string representation.
#print rgb
# Convert RGB to LAB using a D50 illuminant.
lab = rgb.convert_to('lab', target_illuminant='D65')
#print lab
#print "=== End Example ===\n"
# Reference color.
#color1 = LabColor(lab_l=0.9, lab_a=16.3, lab_b=-2.22)
# Color to be compared to the reference.
#color2 = LabColor(lab_l=0.7, lab_a=14.2, lab_b=-1.80)
color2 = lab
res = (1.E100, '')
for c in colors:
rgb = colors[c]
rgb = RGBColor(rgb[0], rgb[1], rgb[2], rgb_type='sRGB')
color1 = rgb.convert_to('lab', target_illuminant='D65')
#print "== Delta E Colors =="
#print " COLOR1: %s" % color1
#print " COLOR2: %s" % color2
#print "== Results =="
#print " CIE2000: %.3f" % color1.delta_e(color2, mode='cie2000')
## Typically used for acceptability.
#print " CMC: %.3f (2:1)" % color1.delta_e(color2, mode='cmc', pl=2, pc=1)
## Typically used to more closely model human percetion.
#print " CMC: %.3f (1:1)" % color1.delta_e(color2, mode='cmc', pl=1, pc=1)
r = color1.delta_e(color2, mode='cmc', pl=2, pc=1)
if (r < res[0]):
res = (r, c, colors[c])
data['Color'] = res[1]
data['Delta_E'] = res[0]
data['RGBref'] = res[2]
return data
def _util_detect_ColorSegments_JSEG(self, im):
tmpjpg = os.path.join(scriptdir, "cache/jseg_buf.jpg")
tmpgif = os.path.join(scriptdir, "cache/jseg_buf.gif")
# same scale func as in '_detect_Faces'
scale = max([1., np.average(np.array(im.size)[0:2]/200.)])
#print np.array(im.size)/scale, scale
try:
smallImg = im.resize( tuple(np.int_(np.array(im.size)/scale)), Image.ANTIALIAS )
except IOError:
pywikibot.warning(u'unknown file type [_util_detect_ColorSegments_JSEG]')
return
#im.thumbnail(size, Image.ANTIALIAS) # size is 640x480
smallImg.convert('RGB').save(tmpjpg, "JPEG", quality=100, optimize=True)
# Program limitation: The total number of regions in the image must be less
# than 256 before the region merging process. This works for most images
# smaller than 512x512.
# Processing time will be about 10 seconds for an 192x128 image and 60 seconds
# for a 352x240 image. It will take several minutes for a 512x512 image.
# Minimum image size is 64x64.
# ^^^ THUS RESCALING TO ABOUT 200px ABOVE ^^^
# sys.stdout handeled, but with freopen which could give issues
import jseg
# e.g. "segdist -i test3.jpg -t 6 -r9 test3.map.gif"
enable_recovery() # enable recovery from hard crash
jseg.segdist_cpp.main( [ item.encode('utf-8') for item in
("segdist -i %s -t 6 -r9 %s"%(tmpjpg, tmpgif)).split(" ") ] )
disable_recovery() # disable since everything worked out fine
#out = open((tmpgif + ".stdout"), "r").read() # reading stdout
#print out
os.remove(tmpgif + ".stdout")
os.remove( tmpjpg )
# http://stackoverflow.com/questions/384759/pil-and-numpy
pic = Image.open(tmpgif)
#pix = np.array(pic)
#Image.fromarray(10*pix).show()
os.remove( tmpgif )
return (pic, scale)
# http://planet.scipy.org/
# http://peekaboo-vision.blogspot.ch/2012/05/superpixels-for-python-pretty-slic.html
# http://ivrg.epfl.ch/supplementary_material/RK_SLICSuperpixels/index.html
def _util_detect_ColorSegments_SLIC(self, img):
import slic
im = np.array(img)
image_argb = np.dstack([im[:, :, :1], im]).copy("C")
#region_labels = slic.slic_n(image_argb, 1000, 10)
region_labels = slic.slic_n(image_argb, 1000, 50)
slic.contours(image_argb, region_labels, 10)
#import matplotlib.pyplot as plt
#plt.imshow(image_argb[:, :, 1:].copy())
#plt.show()
#pic = Image.fromarray(region_labels)
#pic.show()
#return (pic, 1.)
return (region_labels, 1.)
def _util_get_ColorSegmentsHist_PIL(self, im, pic, scale):
if not (type(np.ndarray(None)) == type(pic)):
pix = np.array(pic)
#Image.fromarray(10*pix).show()
else:
pix = pic
#Image.fromarray(255*pix/np.max(pix)).show()
try:
smallImg = im.resize( tuple(np.int_(np.array(im.size)/scale)), Image.ANTIALIAS )
except IOError:
pywikibot.warning(u'unknown file type [_util_get_ColorSegmentsHist_PIL]')
return
imgsize = float(smallImg.size[0]*smallImg.size[1])
hist = []
for i in range(np.max(pix)+1):
mask = np.uint8(pix == i)*255
(y, x) = np.where(mask != 0)
center = (np.average(x)*scale, np.average(y)*scale)
bbox = (np.min(x)*scale, np.min(y)*scale,
(np.max(x)-np.min(x))*scale, (np.max(y)-np.min(y))*scale)
#coverage = np.count_nonzero(mask)/imgsize
coverage = (mask != 0).sum()/imgsize # count_nonzero is missing in older numpy
mask = Image.fromarray( mask )
h = smallImg.histogram(mask)
#smallImg.show()
#dispImg = Image.new('RGBA', smallImg.size)
#dispImg.paste(smallImg, mask)
#dispImg.show()
if (len(h) == 256):
pywikibot.output(u"gray scale image, try to fix...")
h = h*3
if (len(h) == 256*4):
pywikibot.output(u"4-ch. image, try to fix (exclude transparency)...")
h = h[0:(256*3)]
hist.append( (h, coverage, (center, bbox)) )
return hist
# http://www.scipy.org/SciPyPackages/Ndimage
# http://www.pythonware.com/library/pil/handbook/imagefilter.htm
def _util_merge_ColorSegments(self, im, hist):
# merge regions by simplifying through average color and re-running
# JSEG again...
if not (type(np.ndarray(None)) == type(im)):
pix = np.array(im)
else:
pix = im
im = Image.fromarray(255*pix/np.max(pix))
im = im.convert('RGB')
for j, (h, coverage, (center, bbox)) in enumerate(hist):
# split into red, green, blue
r = h[0:256]
g = h[256:256*2]
b = h[256*2: 256*3]
# perform the weighted average of each channel:
# the *index* 'i' is the channel value, and the *value* 'w' is its weight
rgb = (
sum( i*w for i, w in enumerate(r) ) / max(1, sum(r)),
sum( i*w for i, w in enumerate(g) ) / max(1, sum(g)),
sum( i*w for i, w in enumerate(b) ) / max(1, sum(b))
)
# color frequency analysis; do not average regions with high fluctations
#rgb2 = (
# sum( i*i*w for i, w in enumerate(r) ) / max(1, sum(r)),
# sum( i*i*w for i, w in enumerate(g) ) / max(1, sum(g)),
# sum( i*i*w for i, w in enumerate(b) ) / max(1, sum(b))
#)
#if ( 500. < np.average( (
# rgb2[0] - rgb[0]**2,
# rgb2[1] - rgb[1]**2,
# rgb2[2] - rgb[2]**2, ) ) ):
# continue
mask = np.uint8(pix == j)*255
mask = Image.fromarray( mask )
#dispImg = Image.new('RGB', im.size)
#dispImg.paste(rgb, mask=mask)
#dispImg.show()
im.paste(rgb, mask=mask)
pix = np.array(im)
pix[:,:,0] = ndimage.gaussian_filter(pix[:,:,0], .5)
pix[:,:,1] = ndimage.gaussian_filter(pix[:,:,1], .5)
pix[:,:,2] = ndimage.gaussian_filter(pix[:,:,2], .5)
im = Image.fromarray( pix, mode='RGB' )
#im = im.filter(ImageFilter.BLUR) # or 'SMOOTH'
return im
# Category:... (several; look at self.gatherFeatures for more hints)
def _detect_Trained(self, info_desc, cascade_file, maxdim=500.):
# general (self trained) classification (e.g. people, ...)
# http://www.computer-vision-software.com/blog/2009/11/faq-opencv-haartraining/
# Can be used with haar classifier (use: opencv_haartraining) and
# cascaded classifier (use: opencv_traincascade), both should work.
# !!! train a own cascade classifier like for face detection used
# !!! with 'opencv_haartraing' -> xml (file to use like in face/eye detection)
# analogue to face detection:
# http://tutorial-haartraining.googlecode.com/svn/trunk/data/haarcascades/
# or own xml files trained onto specific file database/set
xml = os.path.join(scriptdir, ('externals/opencv/haarcascades/' + cascade_file))
if not os.path.exists(xml):
raise IOError(u"No such file: '%s'" % xml)
cascade = cv2.CascadeClassifier(xml)
scale = 1.
try:
img = cv2.imread( self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR )
if (img == None) or (self.image_size[0] is None):
raise IOError
# !!! the 'scale' here IS RELEVANT FOR THE DETECTION RATE;
# how small and how many features are detected
scale = max([1., np.average(np.array(img.shape)[0:2]/maxdim)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_Trained]')
return
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_Trained]')
return
# similar to face detection
smallImg = np.empty( (cv.Round(img.shape[1]/scale), cv.Round(img.shape[0]/scale)), dtype=np.uint8 )
gray = cv2.cvtColor( img, cv.CV_BGR2GRAY )
smallImg = cv2.resize( gray, smallImg.shape, interpolation=cv2.INTER_LINEAR )
smallImg = cv2.equalizeHist( smallImg )
objects = list(cascade.detectMultiScale( smallImg,
1.1, 5, 0
#|cv.CV_HAAR_FIND_BIGGEST_OBJECT
#|cv.CV_HAAR_DO_ROUGH_SEARCH
|cv.CV_HAAR_SCALE_IMAGE,
(30, 30) ))
result = []
for i, r in enumerate(objects):
data = { 'Position': tuple(np.int_(np.array(r)*scale)) }
data['Coverage'] = float(data['Position'][2]*data['Position'][3])/(self.image_size[0]*self.image_size[1])
result.append( data )
# generic detection ...
self._features[info_desc] = result
return
def _recognize_OpticalCodes(self):
# barcode and Data Matrix recognition (libdmtx/pydmtx, zbar, gocr?)
# http://libdmtx.wikidot.com/libdmtx-python-wrapper
# http://blog.globalstomp.com/2011/09/decoding-qr-code-code-128-code-39.html
# http://zbar.sourceforge.net/
# http://pypi.python.org/pypi/zbar
# DataMatrix
from pydmtx import DataMatrix # linux distro package (fedora) / TS (debian)
## Write a Data Matrix barcode
#dm_write = DataMatrix()
#dm_write.encode("Hello, world!")
#dm_write.save("hello.png", "png")
scale = 1.
try:
# Read a Data Matrix barcode
dm_read = DataMatrix()
img = Image.open(self.image_path_JPEG)
#if (img == None) or (self.image_size[0] is None):
if (self.image_size[0] is None):
raise IOError
# http://libdmtx.wikidot.com/libdmtx-python-wrapper
if img.mode != 'RGB':
img = img.convert('RGB')
scale = max([1., np.average(np.array(img.size)/200.)])
except IOError:
pywikibot.warning(u'unknown file type [_recognize_OpticalCodes]')
return
smallImg = img.resize( (int(img.size[0]/scale), int(img.size[1]/scale)) )
img = smallImg
enable_recovery() # enable recovery from hard crash
#res = dm_read.decode(img.size[0], img.size[1], buffer(img.tostring()))
disable_recovery() # disable since everything worked out fine
#print res
result = []
i = -1
for i in range(dm_read.count()):
data, bbox = dm_read.stats(i+1)
bbox = np.array(bbox)
x, y = bbox[:,0], bbox[:,1]
pos = (np.min(x), np.min(y), np.max(x)-np.min(x), np.max(y)-np.min(y))
result.append({ 'ID': (i+1),
#'Data': dm_read.message(i+1),
'Data': data,
'Position': pos,
'Type': u'DataMatrix',
'Quality': 10, })
self._features['OpticalCodes'] = result
# supports many popular symbologies
try:
import zbar # TS (debian)
except:
import _zbar as zbar # other distros (fedora)
try:
img = Image.open(self.image_path_JPEG).convert('L')
width, height = img.size
except IOError:
pywikibot.warning(u'unknown file type [_recognize_OpticalCodes]')
return
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
zbar_img = zbar.Image(width, height, 'Y800', img.tostring())
# scan the image for barcodes
# http://zbar.sourceforge.net/api/zbar_8h.html
scanner.scan(zbar_img)
for symbol in zbar_img:
i += 1
p = np.array(symbol.location) # list of points within code region/area
p = (min(p[:,0]), min(p[:,1]), (max(p[:,0])-min(p[:,0])), (max(p[:,1])-min(p[:,1])))
result.append({ #'components': symbol.components,
'ID': (i+1),
#'Count': symbol.count, # 'ID'?
'Data': symbol.data or u'-',
'Position': p, # (left, top, width, height)
'Quality': symbol.quality, # usable for 'Confidence'
'Type': symbol.type, })
# further detection ?
self._features['OpticalCodes'] = result
return
def _detect_Chessboard(self):
# Chessboard (opencv reference detector)
# http://www.c-plusplus.de/forum/273920-full
# http://www.youtube.com/watch?v=bV-jAnQ-tvw
# http://nullege.com/codes/show/src%40o%40p%40opencvpython-HEAD%40samples%40chessboard.py/12/cv.FindChessboardCorners/python
scale = 1.
try:
#im = cv.LoadImage(self.image_path_JPEG, cv.CV_LOAD_IMAGE_COLOR)
im = cv2.imread( self.image_path_JPEG, cv2.CV_LOAD_IMAGE_GRAYSCALE )
#im = cv2.imread( 'Mutilated_checkerboard_3_1.jpg', cv2.CV_LOAD_IMAGE_GRAYSCALE )
#im = cv2.imread( 'Jogo_de_Damas_-_Acatabul.JPG', cv2.CV_LOAD_IMAGE_GRAYSCALE )
chessboard_dim = ( 7, 7 )
if im == None:
raise IOError
scale = max([1., np.average(np.array(im.shape)[0:2]/1000.)])
#scale = max([1., np.average(np.array(im.shape)[0:2]/500.)])
#scale = max([1., np.average(np.array(im.shape)[0:2]/450.)])
except IOError:
pywikibot.warning(u'unknown file type [_detect_Chessboard]')
return
except AttributeError:
pywikibot.warning(u'unknown file type [_detect_Chessboard]')
return
smallImg = np.empty( (cv.Round(im.shape[1]/scale), cv.Round(im.shape[0]/scale)), dtype=np.uint8 )
#gray = cv2.cvtColor( im, cv.CV_BGR2GRAY )
smallImg = cv2.resize( im, smallImg.shape, interpolation=cv2.INTER_LINEAR )
#smallImg = cv2.equalizeHist( smallImg )
im = smallImg
found_all = False
corners = None
try:
#found_all, corners = cv.FindChessboardCorners( im, chessboard_dim )
found_all, corners = cv2.findChessboardCorners( im, chessboard_dim )
except cv2.error:
pywikibot.exception(tb=True)
#cv2.drawChessboardCorners( im, chessboard_dim, corners, found_all )
##cv2.imshow("win", im)
##cv2.waitKey()
result = {}
if corners is not None:
result = { 'Corners': [tuple(item[0]) for item in corners], }
self._features['Chessboard'] = [result]
# TODO: improve chessboard detection (make it more tolerant)
# ## http://stackoverflow.com/questions/7624765/converting-an-opencv-image-to-black-and-white
# #im_gray = im
# #im_gray_mat = cv.fromarray(im_gray)
# #im_bw = cv.CreateImage(cv.GetSize(im_gray_mat), cv.IPL_DEPTH_8U, 1)
# #im_bw_mat = cv.GetMat(im_bw)
# #cv.Threshold(im_gray_mat, im_bw_mat, 0, 255, cv.CV_THRESH_BINARY | cv.CV_THRESH_OTSU)
# #im = np.asarray(im_bw_mat)
#
# # chess board recognition (more tolerant)
# # http://codebazaar.blogspot.ch/2011/08/chess-board-recognition-project-part-1.html
# # https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/python/houghlines.py?rev=2770
# # http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.html
# dst = im.copy()
# color_dst = cv2.cvtColor(dst, cv.CV_GRAY2BGR)
# dst = cv2.GaussianBlur(dst, (3, 3), 5)
# thr = 150
# dst = cv2.Canny(dst, thr, 3*thr)
# cv2.imshow("win", dst)
# cv2.waitKey()
# # lines to find grid
# # http://dsp.stackexchange.com/questions/2420/alternatives-to-hough-transform-for-detecting-a-grid-like-structure
# USE_STANDARD = True
# if USE_STANDARD:
# #lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
# #lines = cv2.HoughLines(dst, 1, math.pi / 180, 100)
# lines = cv2.HoughLines(dst, 1, math.pi / 180, 150)
# if (lines is not None) and len(lines):
# lines = lines[0]
# #data['Lines'] = len(lines)
#
# ls = np.array(lines)
# import pylab
# (n, bins, patches) = pylab.hist(ls[:,1])
# print n, bins, patches
# pylab.grid(True)
# pylab.show()
#
# for (rho, theta) in lines:
# #if theta > 0.3125: continue
# a = math.cos(theta)
# b = math.sin(theta)
# x0 = a * rho
# y0 = b * rho
# pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
# pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
# cv2.line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
# else:
# #lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10)
# lines = cv2.HoughLinesP(dst, 1, math.pi / 180, 100)
#
# for line in lines[0]:
# print line
# cv2.line(color_dst, tuple(line[0:2]), tuple(line[2:4]), cv.CV_RGB(255, 0, 0), 3, 8)
# cv2.imshow("win", color_dst)
# cv2.waitKey()
if found_all:
# pose detection
# http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
# http://stackoverflow.com/questions/10022568/opencv-2-3-camera-calibration
d = shelve.open( os.path.join(scriptdir, 'externals/opencv/camera_virtual_default') )
if ('retval' not in d):
# http://commons.wikimedia.org/wiki/File:Mutilated_checkerboard_3.jpg
pywikibot.output(u"Doing (virtual) camera calibration onto reference image 'File:Mutilated_checkerboard_3.jpg'")
im3 = cv2.imread( 'Mutilated_checkerboard_3.jpg', cv2.CV_LOAD_IMAGE_GRAYSCALE )
im3 = cv2.resize( im3, (cv.Round(im3.shape[1]/scale), cv.Round(im3.shape[0]/scale)), interpolation=cv2.INTER_LINEAR )
# Compute the the three dimensional world-coordinates
tmp = []
for h in range(chessboard_dim[0]):
for w in range(chessboard_dim[1]):
tmp.append( (float(h), float(w), 0.0) )
objectPoints = np.array(tmp)
# Compute matrices
_found_all, _corners = cv2.findChessboardCorners( im3, chessboard_dim, flags=cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_FILTER_QUADS )
#cv2.drawChessboardCorners( im3, chessboard_dim, _corners, _found_all )
retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera([objectPoints.astype('float32')], [_corners.astype('float32')], im3.shape, np.eye(3), np.zeros((5, 1)))
fovx, fovy, focalLength, principalPoint, aspectRatio = cv2.calibrationMatrixValues(cameraMatrix, im3.shape, 1.0, 1.0)
d['objectPoints'] = [objectPoints.astype('float32')] # shape: (49, 3) in a list of 1 item
d['imagePoints'] = [_corners.astype('float32')] # shape: (49, 1, 2) in a list of 1 item
d['cameraMatrix'] = cameraMatrix
d['distCoeffs'] = distCoeffs
d['rvecs'] = rvecs
d['tvecs'] = tvecs
d['imageSize'] = im3.shape
d['apertureWidth'] = 1.0
d['apertureHeight'] = 1.0
d['fovx'] = fovx
d['fovy'] = fovy
d['focalLength'] = focalLength
d['principalPoint'] = principalPoint
d['aspectRatio'] = aspectRatio
d['retval'] = retval
else:
objectPoints = d['objectPoints'][0]
cameraMatrix, distCoeffs = d['cameraMatrix'], d['distCoeffs']
# would be nice to use these:
#cameraMatrix, distCoeffs = np.eye(3), np.zeros((5,1))
# ..,since they are simple... else other have to be documented as "used calibration" !!!
d.close()
# http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
rvec, tvec = cv2.solvePnP(objectPoints, corners, cameraMatrix, distCoeffs)
#rvec, tvec = cv2.solvePnP(objectPoints, corners, cameraMatrix, None)
# http://www.opencv.org.cn/opencvdoc/2.3.2/html/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
# -> what about using POSIT ??? (see docs on enwiki)
# http://opencv.willowgarage.com/wiki/Posit
#(cv2.findFundamentalMat, cv2.findHomography or from 'pose', cv2.estimateAffine3D)
# (todo) draw the rotated 3D object (projected down to 2D)
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
## debug: axis-cross(es) - gives strange/wrong results
#for k in range(3): # row
# for j in range(5): # column
# rmat = cv2.Rodrigues(2*3.14/5.*j*np.array(np.eye(3)[:,k]))[0]
# mat, perp = self._util_getD2coords_proj( np.dot(rmat, np.eye(3)), cameraMatrix, None, None, distCoeffs=distCoeffs, sign=-1 )
# self._util_drawAxes(mat, 50+100*j, k*100+50, im)
## debug: rotated axis-cross
#mat, perp = self._util_getD2coords_proj( np.eye(3), cameraMatrix, rvec, tvec, distCoeffs=distCoeffs )
#self._util_drawAxes(mat, 50, 350, im)
## debug: self-calculated rotated axis-cross - gives strange/wrong results
#mat = np.dot((cameraMatrix), np.dot(cv2.Rodrigues(rvec)[0], np.eye(3)))
##mat, perp = self._util_getD2coords_proj( mat, np.eye(3), None, None, distCoeffs=distCoeffs, sign=-1 )
#mat, perp = self._util_getD2coords_proj( mat, np.eye(3), None, None, distCoeffs=np.zeros((5,1)) )
#self._util_drawAxes(mat, 150, 350, im)
# debug: self-calculated rotated axis-cross - results looks good: OK
# (and can be calculated in order to give numerical results)
#rvec = np.zeros(3)
rot = rvec
mat, perp = self._util_getD2coords_calc(np.eye(3), cameraMatrix, rvec, tvec)
ortho = mat[:2,2]
ortho = ortho/np.linalg.norm(ortho)
#self._util_drawAxes(mat, 250, 350, im)
#self._util_drawAxes(mat, 50, 50, im)
# TODO: compare face and chessboard pose estimations and unify them, then document everything (template in wiki, ...)
pywikibot.output(u'result for calibrated camera:\n rot=%s\n perp=%s\n perp2D=%s' % (rot.transpose()[0], perp[:,2], ortho))
pywikibot.output(u'nice would be to do the same for uncalibrated/default cam settings')
result.update({ 'Rotation': tuple(rot.transpose()[0]),
'Perp_Dir' : tuple(perp[:,2]),
'Perp_Dir_2D': tuple(ortho), })
self._features['Chessboard'] = [result]
#cv2.imshow("win", im)
#cv2.waitKey()
return
# def _util_getD2coords_proj(self, D3coords, cameraMatrix, rvec=None, tvec=None, distCoeffs=np.zeros((5,1)), sign=1):
# """Project 3D points down to 2D by using OpenCV functions."""
# if rvec is None:
# rvec = np.zeros((3,1))
# if tvec is None:
# tvec = np.zeros((3,1))
# mat = np.zeros((2,D3coords.shape[0]))
# matnorm = np.zeros((1,D3coords.shape[0]))
# for i in range(D3coords.shape[0]):
# D2raw, jacobian = cv2.projectPoints(np.array([[0.,0.,5.],[D3coords[0,i],D3coords[1,i],D3coords[2,i]+5.]]), rvec, tvec, cameraMatrix, distCoeffs)
# D2norm = (D2raw[1][0]-D2raw[0][0])
# #D2norm[1] *= sign # usual 2D coords <-> pixel/picture coords
# D2norm[0] *= sign # usual 2D coords <-> pixel/picture coords
# D2norm *= sign # invert all
# mat[:,i] = D2norm
# matnorm[:,i] = np.linalg.norm(D2norm)
# mat = mat/max(matnorm[0])
# return (mat, D3coords)
def _util_getD2coords_calc(self, D3coords, cameraMatrix, rvec, tvec, hacky=True):
"""Calculate s m' = A [R|t] M' in order to project 3D points down to 2D.
m' = (u, v, 1)^T, M' = (X, Y, Z, 1)^T, A: camera m. and [R|t]: rotation-
translation matrix.
@see http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
"""
# cv2.decomposeProjectionMatrix(...)
cm = cameraMatrix.copy()
cm[0:2,2] = [0., 0.]
rmat = np.zeros((3,4))
# http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
rmat[:,0:3] = cv2.Rodrigues(rvec)[0]
#rmat[:,0:3] = np.eye(3)
rmat[:,3] = tvec[:,0]
origin = np.dot(rmat, cv2.convertPointsToHomogeneous(np.zeros((3,3)).astype('float32')).transpose()[:,0,:])
origin2D = np.dot((cm), origin) # np.linalg.inv(cm)
#coords = np.dot(cv2.Rodrigues(rvec)[0], D3coords)
coords = np.dot(rmat, cv2.convertPointsToHomogeneous(D3coords.astype('float32')).transpose()[:,0,:])
coords2D = np.dot((cm), coords)
perp = coords - origin
if hacky:
# for '_detect_Chessboard' but looks a bit strange ... may be wrong?!
mat = coords2D - origin2D
mat = mat/max([np.linalg.norm(mat[:,i]) for i in range(3)])
else:
for i in range(3): # rescale with s
coords2D[:,i] /= coords2D[2,i]
origin2D[:,i] /= origin2D[2,i]
mat = coords2D - origin2D
# simple'n'fast solution, if just 2D results are needed
#mat, jacobian = cv2.projectPoints(np.append(np.zeros((1,3)),
# D3coords,
# axis=0),
# rvec, tvec, cm, np.zeros((5,1)))
#mat = mat[:,0,:]
#mat = (mat[1:,:] - mat[0,:]).transpose()
return (mat, perp)
# def _util_drawAxes(self, mat, x, y, im):
# color = [(0., 0., 255.), (0., 255., 0.), (255., 0., 0.)]
# label = ['x', 'y', 'z']
# for i in range(3):
# D2norm = 40*mat[:,i]
# cv2.line(im, (x,y), (x+D2norm[0].astype(int),y+D2norm[1].astype(int)), color[i], 1)
# cv2.putText(im, label[i], (x+D2norm[0].astype(int),y+D2norm[1].astype(int)), cv2.FONT_HERSHEY_PLAIN, 1., color[i])
def _detect_Faces_EXIF(self):
res = self._util_get_DataTags_EXIF()
# http://u88.n24.queensu.ca/exiftool/forum/index.php?topic=3156.0
# http://u88.n24.queensu.ca/pub/facetest.pl
# ( all scaling stuff ignored (!) and some strongly simplified (!) )
# Example: 'File:Annagrah-2 041.JPG' (canon)
if 'Make' in res:
make = res['Make'].lower()
else:
make = ''
found = set(res.keys())
data = []
if 'ImageWidth' in res:
(width, height) = (str(res['ImageWidth']), str(res['ImageHeight']))
(width, height) = (re.sub(u'p[tx]', u'', width), re.sub(u'p[tx]', u'', height))
try:
(width, height) = (int(float(width)+0.5), int(float(height)+0.5))
except ValueError:
pywikibot.warning(u'%s contains incompatible unit(s), skipped' % ((width, height),))
return
else:
(width, height) = self.image_size
wasRotated = (height > width)
if True in [item in make for item in ['sony', 'nikon', 'panasonic', 'casio', 'ricoh']]:
# UNTESTED: ['sony', 'nikon', 'casio', 'ricoh']
# TESTED: ['panasonic']
if set(['FacesDetected', 'Face1Position']).issubset(found):
i = 1
if 'FaceOrientation' in res:
pywikibot.output(res['FaceOrientation']) # for rotation 'rot'
# 'crop' for 'casio' omitted here...
aspect = float(height)/width
if (aspect <= 3./4):
(fw, fh) = (320, 320 * aspect)
else:
(fw, fh) = (240 / aspect, 240)
#(sx, sy) = (1./width, 1./height)
(sx, sy) = (1./fw, 1./fh)
if 'FaceDetectFrameSize' in res:
(width, height) = map(int, res['FaceDetectFrameSize'].split(' '))
(sx, sy) = (1./width, 1./height)
while (('Face%iPosition'%i) in res) and (i <= int(res['FacesDetected'])):
buf = map(int, res['Face%iPosition'%i].split(' '))
(x1, y1) = ((buf[0]-buf[2]/2)*sx, (buf[1]-buf[3]/2)*sy) # 'panasonic'
(x2, y2) = (x1+buf[2]*sx, y1+buf[3]*sy) #
#(x1, y1) = (buf[1]*sx, buf[0]*sy)
#(x2, y2) = (x1+buf[3]*sx, y1+buf[2]*sy)
data.append({ 'Position': (x1, y1, x2, y2) })
if ('RecognizedFace%iName'%i) in res:
pywikibot.output(str((res['RecognizedFace%iName'%i], res['RecognizedFace%iAge'%i])))
i += 1
elif 'fujifilm' in make:
# UNTESTED: 'fujifilm'
if set(['FacesDetected', 'FacePositions']).issubset(found):
buf = map(int, res['FacePositions'].split(' '))
(sx, sy) = (1./width, 1./height)
for i in range(int(res['FacesDetected'])):
data.append({ 'Position': [buf[i*4]*sx, buf[i*4+1]*sy,
buf[i*4+2]*sx, buf[i*4+3]*sy] })
if ('Face%iName'%i) in res:
pywikibot.output(str((res['Face%iName'%i], res['Face%iCategory'%i], res['Face%iBirthday'%i])))
elif 'olympus' in make:
# UNTESTED: 'olympus'
if set(['FacesDetected', 'FaceDetectArea']).issubset(found):
buf = map(int, res['FacesDetected'].split(' '))
if buf[0] or buf[1]:
buf = map(int, res['FaceDetectArea'].split(' '))
for i in range(int(res['MaxFaces'])):
data.append({ 'Position': [buf[i*4], buf[i*4+1], buf[i*4+2], buf[i*4+3]] })
elif True in [item in make for item in ['pentax', 'sanyo']]:
# UNTESTED: ['pentax', 'sanyo']
if set(['FacesDetected']).issubset(found):
i = 1
(sx, sy) = (1./width, 1./height)
while ('Face%iPosition'%i) in res:
buf = map(int, res['Face%iPosition'%i].split(' ') + \
res['Face%iSize'%i].split(' '))
(x1, y1) = ((buf[0] - buf[2]/2.)*sx, (buf[1] - buf[3]/2.)*sy)
(x2, y2) = (x1+buf[2]*sx, y1+buf[3]*sy)
data.append({ 'Position': (x1, y1, x2, y2) })
i += 1
if 'FacePosition' in res:
buf = map(int, res['FacePosition'].split(' ') + ['100', '100']) # how big is the face?
(x1, y1) = (buf[0]*sx, buf[1]*sy)
(x2, y2) = (buf[2]*sx, buf[3]*sy)
data.append({ 'Position': (x1, y1, x2, y2) })
elif 'canon' in make:
if set(['FacesDetected', 'FaceDetectFrameSize']).issubset(found) \
and (int(res['FacesDetected'])):
# TESTED: older models store face detect information
(width, height) = map(int, res['FaceDetectFrameSize'].split(' ')) # default: (320,240)
(sx, sy) = (1./width, 1./height)
fw = res['FaceWidth'] or 35
i = 1
while ('Face%iPosition'%i) in res:
buf = map(int, res['Face%iPosition'%i].split(' '))
(x1, y1) = ((buf[0] + width/2. - fw)*sx, (buf[1] + height/2. - fw)*sy)
(x2, y2) = (x1 + fw*2*sx, y1 + fw*2*sy)
data.append({ 'Position': (x1, y1, x2, y2) })
i += 1
elif set(['ValidAFPoints', 'AFImageWidth', 'AFImageHeight',
'AFAreaXPositions', 'AFAreaYPositions', 'PrimaryAFPoint']).issubset(found):
# TESTED: newer models use AF points
(width, height) = (int(res['AFImageWidth']), int(res['AFImageHeight']))
if ('AFAreaMode' in res) and ('Face' in res['AFAreaMode']):
buf_x = res['AFAreaXPositions'].split(' ')
buf_y = res['AFAreaYPositions'].split(' ')
buf_w = buf_h = [100] * len(buf_x) # how big is the face? (else)
if 'AFAreaWidths' in res:
buf_w = map(int, res['AFAreaWidths'].split(' '))
buf_h = map(int, res['AFAreaHeights'].split(' '))
elif 'AFAreaWidth' in res:
buf_w = [int(res['AFAreaWidth'])] * len(buf_x)
buf_h = [int(res['AFAreaHeight'])] * len(buf_x)
else:
pywikibot.output(u'No AF area size')
# conversion to positive coordinates
buf_x = [ int(x) + width/2. for x in buf_x ]
buf_y = [ int(y) + height/2. for y in buf_y ]
# EOS models have Y flipped
if ('Model' in res) and ('EOS' in res['Model']):
buf_y = [ height - y for y in buf_y ]
(sx, sy) = (1./width, 1./height)
for i in range(int(res['ValidAFPoints'])):
(x1, y1) = ((buf_x[i]-buf_w[i]/2)*sx, (buf_y[i]-buf_h[i]/2)*sy)
(x2, y2) = (x1+buf_w[i]*sx, y1+buf_h[i]*sy)
data.append({ 'Position': (x1, y1, x2, y2) })
else:
# not supported (yet...)
available = [item in res for item in ['FacesDetected', 'ValidAFPoints']]
unknown = ['face' in item.lower() for item in res.keys()]
if make and (True in (available+unknown)):
pywikibot.warning(u"skipped '%s' since not supported (yet) [_detect_Faces_EXIF]" % make)
pywikibot.warning(u"FacesDetected: %s - ValidAFPoints: %s" % tuple(available))
# finally, rotate face coordinates if image was rotated
if wasRotated:
rot = 270
# variable rotation omitted here... ($$faceInfo{Rotation})
for i, d in enumerate(data):
# rotate face coordinates
p = data[i]['Position']
if wasRotated:
if (rot == 90):
p = (p[1], 1-p[0], p[3], 1-p[2])
else:
p = (1-p[1], p[0], 1-p[3], p[2])
if 'Rotation' in data[i]:
data[i]['Rotation'] -= rot
data[i]['Rotation'] += 360 if data[i]['Rotation'] < 0 else 0
# rescale relative sizes to real pixel values
p = (p[0]*self.image_size[0] + 0.5, p[1]*self.image_size[1] + 0.5,
p[2]*self.image_size[0] + 0.5, p[3]*self.image_size[1] + 0.5)
# change from (x1, y1, x2, y2) to (x, y, w, h)
#data[i]['Position'] = (p[0], p[1], p[0]-p[2], p[3]-p[1])
data[i]['Position'] = (min(p[0],p[2]), min(p[1],p[3]),
abs(p[0]-p[2]), abs(p[3]-p[1]))
data[i] = { 'Position': tuple(map(int, data[i]['Position'])),
'ID': (i+1),
'Type': u'Exif',
'Eyes': [],
'Mouth': (),
'Nose': (), }
data[i]['Coverage'] = float(data[i]['Position'][2]*data[i]['Position'][3])/(self.image_size[0]*self.image_size[1])
# (exclusion of duplicates is done later by '_util_merge_Regions')
self._features['Faces'] += data
return
def _detect_History(self):
res = self._util_get_DataTags_EXIF()
#a = []
#for k in res.keys():
# if 'history' in k.lower():
# a.append( k )
#for item in sorted(a):
# print item
# http://tilloy.net/dev/pyexiv2/api.html#pyexiv2.xmp.XmpTag
#print [getattr(res['Xmp.xmpMM.History'], item) for item in ['key', 'type', 'name', 'title', 'description', 'raw_value', 'value', ]]
result = []
i = 1
while (('Xmp.xmpMM.History[%i]' % i) in res):
data = { 'ID': i,
'Software': u'-',
'Timestamp': u'-',
'Action': u'-',
'Info': u'-', }
if ('Xmp.xmpMM.History[%i]/stEvt:softwareAgent'%i) in res:
data['Software'] = res['Xmp.xmpMM.History[%i]/stEvt:softwareAgent'%i].value
data['Timestamp'] = res['Xmp.xmpMM.History[%i]/stEvt:when'%i].value
data['Action'] = res['Xmp.xmpMM.History[%i]/stEvt:action'%i].value
if ('Xmp.xmpMM.History[%i]/stEvt:changed'%i) in res:
data['Info'] = res['Xmp.xmpMM.History[%i]/stEvt:changed'%i].value
#print res['Xmp.xmpMM.History[%i]/stEvt:instanceID'%i].value
result.append( data )
elif ('Xmp.xmpMM.History[%i]/stEvt:parameters'%i) in res:
data['Action'] = res['Xmp.xmpMM.History[%i]/stEvt:action'%i].value
data['Info'] = res['Xmp.xmpMM.History[%i]/stEvt:parameters'%i].value
#data['Action'] = data['Info'].split(' ')[0]
result.append( data )
else:
pass
i += 1
self._features['History'] = result
return
def _util_merge_Regions(self, regs, sub=False, overlap=False, close=False):
# sub=False, overlap=False, close=False ; level 0 ; similar regions, similar position (default)
# sub=True, overlap=False, close=False ; level 1 ; region contained in other, any shape/size
# sub=False, overlap=True, close=False ; level 2 ; center of region conatained in other
# sub=False, overlap=False, close=True ; level 3 ; regions placed close together
if not regs:
return ([], [])
dmax = np.linalg.norm(self.image_size)
#thsr = 1.0 # strict: if it is contained completely
thsr = 0.95 # a little bit tolerant: nearly completly contained (or 0.9)
drop = []
for i1, r1i in enumerate(regs):
r1 = np.float_(r1i)
(xy1, wh1) = (r1[0:2], r1[2:4])
c1 = xy1 + wh1/2
a1 = wh1[0]*wh1[1]
# check for duplicates (e.g. similar regions in similar position)
i2 = 0
while (i2 < i1):
r2i, r2 = regs[i2], np.float_(regs[i2])
(xy2, wh2) = (r2[0:2], r2[2:4])
c2 = xy2 + wh2/2
a2 = wh2[0]*wh2[1]
dr = np.linalg.norm(c1-c2)/dmax
intersect = gtk.gdk.Rectangle(*r1i).intersect(gtk.gdk.Rectangle(*r2i))
area = intersect.width*intersect.height
ar1, ar2 = area/a1, area/a2
check = [(1-dr), ar1, ar2]
# (I assume the 1. condition (1-dr) to be always true if the 2.
# and 3. are - so it's obsolete... how is the analytic relation?)
# add the first match (first is assumed to be the best one) / drop second one
#print check, np.average(check), np.std(check)
if (np.average(check) >= 0.9) and (np.std(check) <= 0.1):
#if (np.average(check) >= 0.85) and (np.std(check) <= 0.1):
drop.append( i1 )
# remove all sub-rect/-regions (all regions fully contained in other)
if sub:
#drop.append( [i1, i2][check[0:2].index(1.0)] )
if (ar1 >= thsr) and (i2 not in drop):
drop.append( i1 )
elif (ar2 >= thsr) and (i1 not in drop):
drop.append( i2 )
# from '_detect_Faces()'
if overlap:
if (r2[0] <= c1[0] <= (r2[0] + r2[2])) and \
(r2[1] <= c1[1] <= (r2[1] + r2[3])) and (i2 not in drop):
drop.append( i1 )
if close:
if (check[0] >= 0.985) and (i2 not in drop): # at least (!)
drop.append( i1 )
i2 += 1
drop = sorted(list(set(drop)))
drop.reverse()
for i in drop:
del regs[i]
return (regs, drop)
class _PngFile(_JpegFile):
pass
class _GifFile(_JpegFile):
pass
class _TiffFile(_JpegFile):
pass
class _XcfFile(_JpegFile):
def _convert(self):
# Very few programs other than GIMP read XCF files. This is by design
# from the GIMP developers, the format is not really documented or
# supported as a general-purpose file format.
# Commons uses ImageMagick, thus we have EXACTLY THE SAME support!
# (can also be a drawback, e.g. when the library is buggy...)
proc = Popen("convert %s %s" % (self.image_path, self.image_path_JPEG),
shell=True, stderr=PIPE)#.stderr.read()
proc.wait()
if proc.returncode != 0:
raise ImportError("convert (ImageMagick) not found (may be other error occured)!")
elif proc.returncode:
self.image_path_JPEG = self.image_path
#data = Popen("identify -verbose info: %s" % self.image_path,
# shell=True, stderr=PIPE).stderr.read()
#print data
if not os.path.exists(self.image_path_JPEG):
# xcf can have more than 1 layer/page like gif, tiff, and movies...
self.image_path_JPEG = self.image_path_JPEG.replace('.jpg', '-0.jpg')
self.image_size = Image.open(self.image_path_JPEG).size
# MIME: 'image/x-xcf; charset=binary'
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
result = { 'Format': u'%s' % self.file_mime[1].upper(),
# DO NOT use ImageMagick (identify) instead of PIL to get these info !!
'Length': -1, # pages/layers
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), }
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
class _SvgFile(_JpegFile):
def _convert(self):
# SVG: rasterize the SVG to bitmap (MAY BE GET FROM WIKI BY DOWNLOAD?...)
# (Mediawiki uses librsvg too: http://commons.wikimedia.org/wiki/SVG#SVGs_in_MediaWiki)
# http://stackoverflow.com/questions/6589358/convert-svg-to-png-in-python
# http://cairographics.org/pythoncairopil/
# http://cairographics.org/pyrsvg/
# http://stackoverflow.com/questions/9166400/convert-rgba-png-to-rgb-with-pil
try:
svg = rsvg.Handle(self.image_path)
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, svg.props.width, svg.props.height)
ctx = cairo.Context(img)
svg.render_cairo(ctx)
#img.write_to_png("svg.png")
#Image.frombuffer("RGBA",( img.get_width(),img.get_height() ),
# img.get_data(),"raw","RGBA",0,1).save(self.image_path_JPEG, "JPEG")
png = Image.frombuffer("RGBA",( img.get_width(),img.get_height() ),
img.get_data(),"raw","RGBA",0,1)
background = Image.new("RGB", png.size, (255, 255, 255))
background.paste(png, mask=png.split()[3]) # 3 is the alpha channel
background.save(self.image_path_JPEG, "JPEG")
self.image_size = (svg.props.width, svg.props.height)
except MemoryError:
self.image_path_JPEG = self.image_path
except SystemError:
self.image_path_JPEG = self.image_path
# MIME: 'application/xml; charset=utf-8'
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
result = {'Format': u'-', 'Length': -1}
# similar to PDF page count OR use BeautifulSoup
svgcountpages = re.compile("<page>")
pc = len(svgcountpages.findall( file(self.image_path,"r").read() ))
#svg = rsvg.Handle(self.image_path)
# http://validator.w3.org/docs/api.html#libs
# http://pypi.python.org/pypi/py_w3c/
vld = HTMLValidator()
valid = u'SVG'
try:
vld.validate(self.image.fileUrl())
valid = (u'Valid SVG' if vld.result.validity == 'true' else u'Invalid SVG')
except urllib2.URLError:
pass
except ValidationFault:
pass
#print vld.errors, vld.warnings
#self.image_size = (svg.props.width, svg.props.height)
result.update({ 'Format': valid,
'Mode': u'-',
'Palette': u'-',
'Length': pc, # pages
# may be set {{validSVG}} also or do something in bot template to
# recognize 'Format=SVG (valid)' ...
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), })
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
class _PdfFile(_JpegFile):
def getFeatures(self):
# optical and other text recognition (tesseract & ocropus, ...)
self._detect_EmbeddedText()
# self._recognize_OpticalText()
# (may be just classify as 'contains text', may be store text, e.g. to wikisource)
return self._features
def _convert(self):
# self._wikidata = self.image._latestInfo # all info wikimedia got from content (mime, sha1, ...)
# PDF: support extract text and images
# (Mediawiki uses ghostscript: https://www.mediawiki.org/wiki/Extension:PdfHandler#Pre-requisites)
# http://vermeulen.ca/python-pdf.html
# http://code.activestate.com/recipes/511465-pure-python-pdf-to-text-converter/
# http://stackoverflow.com/questions/25665/python-module-for-converting-pdf-to-text
if os.path.splitext(self.image_filename)[1].lower() == u'.pdf':
pass
# MIME: 'application/pdf; charset=binary'
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
# http://code.activestate.com/recipes/496837-count-pdf-pages/
#rxcountpages = re.compile(r"$\s*/Type\s*/Page[/\s]", re.MULTILINE|re.DOTALL)
rxcountpages = re.compile(r"/Type\s*/Page([^s]|$)", re.MULTILINE|re.DOTALL) # PDF v. 1.3,1.4,1.5,1.6
pc = len(rxcountpages.findall( file(self.image_path,"rb").read() ))
result = { 'Format': u'PDF',
'Mode': u'-',
'Palette': u'-',
'Length': pc, # pages
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), }
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
# ./run-test (ocropus/ocropy)
# (in fact all scripts/executables used here are pure python scripts!!!)
def _recognize_OpticalText(self):
# optical text recognition (tesseract & ocropus, ...)
# (no full recognition but - at least - just classify as 'contains text')
# http://www.claraocr.org/de/ocr/ocr-software/open-source-ocr.html
# https://github.com/edsu/ocropy
# http://de.wikipedia.org/wiki/Benutzer:DrTrigonBot/Doku#Categorization
# Usage:tesseract imagename outputbase [-l lang] [configfile [[+|-]varfile]...]
# tesseract imagename.tif output
# (it's simpler to run the scripts/executables in own environment/interpreter...)
path = os.path.join(scriptdir, 'dtbext/_ocropus/ocropy')
curdir = os.path.abspath(os.curdir)
os.chdir(path)
# binarization
if os.path.exists(os.path.join(path, "temp")):
shutil.rmtree(os.path.join(path, "temp"))
if os.system("ocropus-nlbin %s -o %s" % (self.image_path_JPEG, os.path.join(path, "temp"))):
raise ImportError("ocropus not found!")
# page level segmentation
if os.system("ocropus-gpageseg --minscale 6.0 '%s'" % os.path.join(path, "temp/????.bin.png")):
# detection error
return
# raw text line recognition
if os.system("ocropus-lattices --writebestpath '%s'" % os.path.join(path, "temp/????/??????.bin.png")):
# detection error
return
# language model application
# (optional - improve the raw results by applying a pretrained model)
os.environ['OCROPUS_DATA'] = os.path.join(path, "models/")
if os.system("ocropus-ngraphs '%s'" % os.path.join(path, "temp/????/??????.lattice")):
# detection error
return
# create hOCR output
if os.system("ocropus-hocr '%s' -o %s" % (os.path.join(path, "temp/????.bin.png"), os.path.join(path, "temp.html"))):
# detection error
return
## 'create HTML for debugging (use "firefox temp/index.html" to view)'
## (optional - generate human readable debug output)
#if os.system("ocropus-visualize-results %s" % os.path.join(path, "temp")):
# # detection error
# return
# "to see recognition results, type: firefox temp.html"
# "to see details on the recognition process, type: firefox temp/index.html"
tmpfile = open(os.path.join(path, "temp.html"), 'r')
data = tmpfile.read()
tmpfile.close()
shutil.rmtree(os.path.join(path, "temp"))
os.remove(os.path.join(path, "temp.html"))
os.chdir(curdir)
#print data
pywikibot.output(data)
def _detect_EmbeddedText(self):
# may be also: http://www.reportlab.com/software/opensource/rl-toolkit/
# poppler pdftotext/pdfimages
# (similar as in '_util_get_DataTags_EXIF' but with stderr and no json output)
# http://poppler.freedesktop.org/
# http://www.izzycode.com/bash/how-to-install-pdf2text-on-centos-fedora-redhat.html
# MIGHT BE BETTER TO USE AS PYTHON MODULE:
# https://launchpad.net/poppler-python/
# http://stackoverflow.com/questions/2732178/extracting-text-from-pdf-with-poppler-c
# http://stackoverflow.com/questions/25665/python-module-for-converting-pdf-to-text
#proc = Popen("pdftotext -layout %s %s" % (self.image_path, self.image_path+'.txt'),
proc = Popen("pdftotext %s %s" % (self.image_path, self.image_path+'.txt'),
shell=True, stderr=PIPE)#.stderr.readlines()
proc.wait()
if proc.returncode:
raise ImportError("pdftotext not found!")
data = open(self.image_path+'.txt', 'r').readlines()
os.remove( self.image_path+'.txt' )
# self._content_text = data
(s1, l1) = (len(u''.join(data)), len(data))
tmp_path = os.path.join(os.environ.get('TMP', '/tmp'), 'DrTrigonBot/')
os.mkdir( tmp_path )
# switch this part off since 'pdfimages' (on toolserver) is too old; TS-1449
# proc = Popen("pdfimages -p %s %s/" % (self.image_path, tmp_path),
proc = Popen("pdfimages %s %s/" % (self.image_path, tmp_path),
shell=True, stderr=PIPE)#.stderr.readlines()
proc.wait()
if proc.returncode:
raise ImportError("pdfimages not found!")
images = os.listdir( tmp_path )
# pages = set()
for f in images:
# pages.add( int(f.split('-')[1]) )
os.remove( os.path.join(tmp_path, f) )
os.rmdir( tmp_path )
## pdfminer (tools/pdf2txt.py)
## http://denis.papathanasiou.org/?p=343 (for layout and images)
#debug = 0
#laparams = layout.LAParams()
##
#pdfparser.PDFDocument.debug = debug
#pdfparser.PDFParser.debug = debug
#cmapdb.CMapDB.debug = debug
#pdfinterp.PDFResourceManager.debug = debug
#pdfinterp.PDFPageInterpreter.debug = debug
#pdfdevice.PDFDevice.debug = debug
##
#rsrcmgr = pdfinterp.PDFResourceManager(caching=True)
#outfp = StringIO.StringIO()
#device = converter.TextConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams)
##device = converter.XMLConverter(rsrcmgr, outfp, codec='utf-8', laparams=laparams, outdir=None)
##device = converter.HTMLConverter(rsrcmgr, outfp, codec='utf-8', scale=1,
## layoutmode='normal', laparams=laparams, outdir=None)
##device = pdfdevice.TagExtractor(rsrcmgr, outfp, codec='utf-8')
#fp = file(self.image_path, 'rb')
#try:
# pdfinterp.process_pdf(rsrcmgr, device, fp, set(), maxpages=0, password='',
# caching=True, check_extractable=False)
#except AssertionError:
# pywikibot.warning(u'pdfminer missed, may be corrupt [_detect_EmbeddedText]')
# return
#except TypeError:
# pywikibot.warning(u'pdfminer missed, may be corrupt [_detect_EmbeddedText]')
# return
#fp.close()
#device.close()
#data = outfp.getvalue().splitlines(True)
#
#(s2, l2) = (len(u''.join(data)), len(data))
result = { 'Size': s1,
'Lines': l1,
#'Data': data,
#'Position': pos,
# 'Images': u'%s (on %s page(s))' % (len(images), len(list(pages))), # pages containing images
'Images': u'%s' % len(images),
'Type': u'-', } # 'Type' could be u'OCR' above...
self._features['Text'] = [result]
return
#class DjvuFile(_JpegFile):
# pass
class _OggFile(_JpegFile):
def getFeatures(self):
# general handling of all audio and video formats
self._detect_Streams() # Streams
# general audio feature extraction
# self._detect_AudioFeatures() # Audio
return self._features
# MIME: 'application/ogg; charset=binary'
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
# 'ffprobe' (ffmpeg); audio and video streams files (ogv, oga, ...)
d = self._util_get_DataStreams_FFMPEG()
#print d
#print self._util_get_DataTags_EXIF()['Duration']
result = { 'Format': u'%s' % d['format']['format_name'].upper(),
'Length': float(d['format']['duration']), # secs/frames
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), }
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
def _detect_Streams(self):
# audio and video streams files (ogv, oga, ...)
d = self._util_get_DataStreams_FFMPEG()
if not d:
return
result = []
for s in d['streams']:
#print s
if (s["codec_type"] == "video"):
rate = s["avg_frame_rate"]
dim = (int(s["width"]), int(s["height"]))
#asp = s["display_aspect_ratio"]
elif (s["codec_type"] == "audio"):
# switch this part off since 'ffprobe' (on toolserver) is too old
# rate = u'%s/%s/%s' % (s["channels"], s["sample_fmt"], s["sample_rate"])
rate = u'%s/%s/%s' % (s["channels"], u'-', int(float(s["sample_rate"])))
dim = None
elif (s["codec_type"] == "data"):
rate = None
dim = None
result.append({ 'ID': int(s["index"]) + 1,
'Format': u'%s/%s' % (s["codec_type"], s.get("codec_name",u'?')),
'Rate': rate or None,
'Dimensions': dim or (None, None),
'Duration': None if (s['duration'].lower() == 'n/a')
else float(s['duration']),
})
if 'image' in d["format"]["format_name"]:
result = []
self._features['Streams'] = result
return
def _util_get_DataStreams_FFMPEG(self):
if hasattr(self, '_buffer_FFMPEG'):
return self._buffer_FFMPEG
# (similar as in '_util_get_DataTags_EXIF')
# switch this part off since 'ffprobe' (on toolserver) is too old; TS-1449
# data = Popen("ffprobe -v quiet -print_format json -show_format -show_streams %s" % self.image_path,
proc = Popen("ffprobe -v quiet -show_format -show_streams %s" % self.image_path,#.replace('%', '%%'),
shell=True, stdout=PIPE)#.stdout.read()
proc.wait()
if proc.returncode == 127:
raise ImportError("ffprobe (ffmpeg) not found!")
data = proc.stdout.read().strip()
# self._buffer_FFMPEG = json.loads(data)
res, key, cur = {}, '', {}
for item in data.splitlines():
if (item[0] == '['):
if not (item[1] == '/'):
key = item[1:-1]
cur = {}
if key not in res:
res[key] = []
else:
res[key].append( cur )
else:
val = item.split('=')
cur[val[0].strip()] = val[1].strip()
if res:
res = { 'streams': res['STREAM'], 'format': res['FORMAT'][0] }
self._buffer_FFMPEG = res
return self._buffer_FFMPEG
def _detect_AudioFeatures(self):
# http://yaafe.sourceforge.net/manual/tools.html
# http://yaafe.sourceforge.net/manual/quickstart.html - yaafe.py
# ( help: yaafe.py -h / features: yaafe.py -l )
#
# compile yaafe on fedora:
# 1.) get and compile 'argtable2' (2-13)
# 1.1 download from http://argtable.sourceforge.net/
# 1.2 unpack and cd to directory
# 1.3 $ ccmake .
# 1.4 set: CMAKE_BUILD_TYPE = Release
# 1.5 press: c, g (in order to configure and generate)
# 1.6 $ make
# 2.) get and compile 'yaafe'
# 1.1 download from http://yaafe.sourceforge.net/
# 1.2 unpack and cd to directory
# 1.3 $ ccmake .
# 1.4 set: ARGTABLE2_INCLUDE_DIR = /home/ursin/Desktop/argtable2-13/src
# ARGTABLE2_LIBRARY = /home/ursin/Desktop/argtable2-13/src/libargtable2.a
# ...
# DL_INCLUDE_DIR = /usr/include
# DL_LIBRARY = /usr/lib64/libdl.so
# FFTW3_INCLUDE_DIR = /usr/include
# FFTW3_LIBRARY = /usr/lib64/libfftw3.so
# HDF5_HL_LIBRARY = /usr/lib64/libhdf5_hl.so
# HDF5_INCLUDE_DIR = /usr/include
# HDF5_LIBRARY = /usr/lib64/libhdf5.so
# LAPACK_LIBRARY = /usr/lib64/liblapack.so
# MATLAB_ROOT = MATLAB_ROOT-NOTFOUND
# MPG123_INCLUDE_DIR = /usr/include
# MPG123_LIBRARY = /usr/lib64/libmpg123.so
# RT_LIBRARY = /usr/lib64/librt.so
# SNDFILE_INCLUDE_DIR = /usr/include
# SNDFILE_LIBRARY = /usr/lib64/libsndfile.so
# ...
# WITH_FFTW3 = ON
# WITH_HDF5 = ON
# WITH_LAPACK = ON
# WITH_MATLAB_MEX = OFF
# WITH_MPG123 = ON
# WITH_SNDFILE = ON
# WITH_TIMERS = ON
# (use t to toggle to more advanced options)
# CMAKE_CXX_FLAGS = -fpermissive
# CMAKE_C_FLAGS = -fpermissive
# (install all needed dependencies/packages into the OS also)
# 1.5 press: c, g (in order to configure and generate)
# 1.6 $ make
# 1.7 $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/ursin/Desktop/yaafe-v0.64/src_cpp/yaafe-python/:/home/ursin/Desktop/yaafe-v0.64/src_cpp/yaafe-io/:/home/ursin/Desktop/yaafe-v0.64/src_cpp/yaafe-core/:/home/ursin/Desktop/yaafe-v0.64/src_cpp/yaafe-components/
# $ export YAAFE_PATH=/home/ursin/Desktop/yaafe-v0.64/src_python/
# $ export PYTHONPATH=/home/ursin/Desktop/yaafe-v0.64/src_python
import yaafelib as yaafe
# use WAV, OGG, MP3 (and others) audio file formats
#audiofile = '/home/ursin/data/09Audio_UNS/Amy MacDonald - This Is The Life (2007) - Pop/01-amy_macdonald-mr_rock_and_roll.mp3'
audiofile = self.image_path
yaafe.setVerbose(True)
#print 'Yaafe v%s'%yaafe.getYaafeVersion()
# Load important components
if (yaafe.loadComponentLibrary('yaafe-io')!=0):
pywikibot.warning(u'cannot load yaafe-io component library !') # ! needed, else it will crash !
# Build a DataFlow object using FeaturePlan
fp = yaafe.FeaturePlan(sample_rate=44100, normalize=0.98, resample=False)
#fp.addFeature('am: AmplitudeModulation blockSize=512 stepSize=256')
#fp.addFeature('ac: AutoCorrelation blockSize=512 stepSize=256')
#fp.addFeature('cdod: ComplexDomainOnsetDetection blockSize=512 stepSize=256')
#fp.addFeature('erg: Energy blockSize=512 stepSize=256')
#fp.addFeature('e: Envelope blockSize=512 stepSize=256')
fp.addFeature('ess: EnvelopeShapeStatistics blockSize=512 stepSize=256')
#fp.addFeature('f: Frames blockSize=512 stepSize=256')
#fp.addFeature('lpc: LPC blockSize=512 stepSize=256')
#fp.addFeature('lsf: LSF blockSize=512 stepSize=256')
#fp.addFeature('l: Loudness blockSize=512 stepSize=256')
#fp.addFeature('mfcc: MFCC blockSize=512 stepSize=256')
## features: AutoCorrelationPeaksIntegrator, Cepstrum, Derivate, HistogramIntegrator, SlopeIntegrator, StatisticalIntegrator
#fp.addFeature('mfcc_d1: MFCC blockSize=512 stepSize=256 > Derivate DOrder=1')
#fp.addFeature('mfcc_d2: MFCC blockSize=512 stepSize=256 > Derivate DOrder=2')
#fp.addFeature('mas: MagnitudeSpectrum blockSize=512 stepSize=256')
#fp.addFeature('mes: MelSpectrum blockSize=512 stepSize=256')
#fp.addFeature('obsi: OBSI blockSize=512 stepSize=256')
#fp.addFeature('obsir: OBSIR blockSize=512 stepSize=256')
#fp.addFeature('psh: PerceptualSharpness blockSize=512 stepSize=256')
#fp.addFeature('psp: PerceptualSpread blockSize=512 stepSize=256')
#fp.addFeature('scfpb: SpectralCrestFactorPerBand blockSize=512 stepSize=256')
#fp.addFeature('sd: SpectralDecrease blockSize=512 stepSize=256')
#fp.addFeature('sfa: SpectralFlatness blockSize=512 stepSize=256')
#fp.addFeature('sfpb: SpectralFlatnessPerBand blockSize=512 stepSize=256')
#fp.addFeature('sfu: SpectralFlux blockSize=512 stepSize=256')
#fp.addFeature('sr: SpectralRolloff blockSize=512 stepSize=256')
fp.addFeature('sss: SpectralShapeStatistics blockSize=512 stepSize=256')
#fp.addFeature('ss: SpectralSlope blockSize=512 stepSize=256')
#fp.addFeature('sv: SpectralVariation blockSize=512 stepSize=256')
fp.addFeature('tss: TemporalShapeStatistics blockSize=512 stepSize=256')
fp.addFeature('zcr: ZCR blockSize=512 stepSize=256')
df = fp.getDataFlow()
## or load a DataFlow from dataflow file.
#df = DataFlow()
#df.load(dataflow_file)
#fp.getDataFlow().save('')
#print df.display()
# configure an Engine
engine = yaafe.Engine()
engine.load(df)
# extract features from an audio file using AudioFileProcessor
afp = yaafe.AudioFileProcessor()
#afp.setOutputFormat('csv','',{}) # ! needed, else it will crash ! (but now produces file output)
#afp.processFile(engine,audiofile)
#feats = engine.readAllOutputs()
## and play with your features
#print feats
# extract features from an audio file and write results to csv files
afp.setOutputFormat('csv','output',{'Precision':'8'})
afp.processFile(engine,audiofile)
# this creates output/myaudio.wav.mfcc.csv, .mfcc_d1.csv and .mfcc_d2.csv files.
## extract features from a numpy array
#audio = np.random.randn(1,100000)
#feats = engine.processAudio(audio)
## and play with your features
#print feats
import csv
data = {}
for ext in ['ess', 'sss', 'tss', 'zcr']:
fn = 'output' + audiofile + ('.%s.csv' % ext)
with open(fn, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
d = [row for row in reader]
d = np.array(d[5:]) # cut header and convert to numpy
d = np.float_(d)
d = tuple(np.average(d, axis=0))
pywikibot.output(ext)
#if ext in ['ess', 'sss', 'tss']:
# pywikibot.output(u"centroid: %s\nspread: %s\nskewness: %s\nkurtosis: %s\n" % d)
#elif ext in ['zcr']:
# pywikibot.output(u"zero-crossing rate: %s\n" % d)
data[ext.upper()] = d
os.remove(fn)
# remove folder too...
self._features['Audio'] = [data]
return
#class _MidiFile(_UnknownFile):
class _MidiFile(_OggFile):
def _detect_HeaderAndMetadata(self):
#_UnknownFile._detect_HeaderAndMetadata(self)
#result = {'Desc': self._properties['Metadata'][0]['Desc'].splitlines()}
# extract data from midi file
# http://valentin.dasdeck.com/midi/midifile.htm
# http://stackoverflow.com/questions/3943149/reading-and-interpreting-data-from-a-binary-file-in-python
ba = bytearray(open(self.file_name, 'rb').read())
i = -1
res = {'Desc': []}
for key, data in [('Text', '\x01'), ('Copyright', '\x02')]:#, ('Lyrics', '\x05')]:
key = 'Desc'
#res[key] = []
while True:
i = ba.find('\xff%s' % data, i+1)
if i < 0: # something found?
break
e = (i+3+ba[i+2])
if ba[e] != 0: # length match with string end (00)?
e = ba.find('\x00', (i+3+ba[i+2]))
res[key].append(ba[i+3:e].decode('latin-1').strip())
#res[key] = u'\n'.join(res[key])
res['Desc'] = u'\n'.join(res['Desc'])
## find specific info in extracted data
#print [item.strip() for item in re.findall('Generated .*?\n', result['Text'])]
#import dateutil.parser
#dates = []
#for line in result['Text'].splitlines():
# # http://stackoverflow.com/questions/3276180/extracting-date-from-a-string-in-python
# try:
# dates.append(dateutil.parser.parse(line, fuzzy=True).isoformat(' ').decode('utf-8'))
# except ValueError:
# pass
#print dates
result = { 'Software': u'-',
'Desc': res['Desc'] if res['Desc'] else u'-',
'Comment': u'-',
'Producer': u'-',
'Misc': u'-', }
import _music21 as music21
try:
s = music21.converter.parse(self.file_name)
if s.metadata:
pywikibot.output(unicode(s.metadata))
result.update(s.metadata)
except music21.midi.base.MidiException:
pass
self._properties['Metadata'] = [result]
return
# MIME: 'audio/midi; charset=binary'
def _detect_Properties(self):
"""Retrieve as much file property info possible, especially the same
as commons does in order to compare if those libraries (ImageMagick,
...) are buggy (thus explicitely use other software for independence)"""
# 'ffprobe' (ffmpeg); audio and video streams files (ogv, oga, ...)
d = self._util_get_DataStreams_MUSIC21()
result = { 'Format': u'%s' % self.file_mime[1].upper(),
'Length': d["duration"], # secs
'Dimensions': self.image_size,
'Filesize': os.path.getsize(self.file_name),
'MIME': u'%s/%s' % tuple(self.file_mime[:2]), }
#self._properties['Properties'] = [result]
self._properties['Properties'][0].update(result)
return
# midi audio stream/feature extraction, detect streams of notes; parts
def _detect_Streams(self):
# like in '_OggFile' (streams) a nice content listing of MIDI (music21)
d = self._util_get_DataStreams_MUSIC21()
if not d:
return
data = []
for i, part in enumerate(d["parts"]):
#print part.elements
mm = part.elements[0] # MetronomeMark
ts = part.elements[1] # TimeSignature
stream = part.notes # Stream - containing all Note(s)
#print mm.secondsPerQuarter()
#print mm.durationToSeconds(part.duration.quarterLength)
#print sum([item.seconds for item in stream]) # sum over all Note(s)
#print part.metadata
data.append( {'ID': (i+1),
'Format': u'(audio/midi)',
# note rate / noteduration ...??
'Rate': u'%s/-/-' % d["channels"][i],
'Dimension': (None, None),
'Duration': part.seconds,} )
self._features['Streams'] = data
return
def _util_get_DataStreams_MUSIC21(self):
if hasattr(self, '_buffer_MUSIC21'):
return self._buffer_MUSIC21
import _music21 as music21
#music21.features.jSymbolic.getCompletionStats()
try:
#audiofile = '/home/ursin/Desktop/3_Ships.mid'
#s = music21.midi.translate.midiFilePathToStream(self.file_name)
s = music21.converter.parse(self.file_name)
except music21.midi.base.MidiException:
pywikibot.warning(u'unknown file type [_detect_Streams]')
return
#fs = music21.features.jSymbolic.extractorsById
#for k in fs:
# for i in range(len(fs[k])):
# if fs[k][i] is not None:
# n = fs[k][i].__name__
# if fs[k][i] not in music21.features.jSymbolic.featureExtractors:
# n += " (not implemented)"
# print k, i, n
# else:
# fe = fs[k][i](s)
# print k, i, n,
# try:
# f = fe.extract()
# print f.name, f.vector
# except AttributeError:
# print "ERROR"
# TODO: do we extract "streams" and/or features here ... ???!?
# data = [{'RegisterImportance': (music21.features.jSymbolic.ImportanceOfBassRegisterFeature(s).extract().vector[0],
# music21.features.jSymbolic.ImportanceOfMiddleRegisterFeature(s).extract().vector[0],
# music21.features.jSymbolic.ImportanceOfHighRegisterFeature(s).extract().vector[0],),
# 'NoteDuration': (music21.features.jSymbolic.AverageNoteDurationFeature(s).extract().vector[0],
# music21.features.jSymbolic.MaximumNoteDurationFeature(s).extract().vector[0],),
# 'IndependentVoices': (music21.features.jSymbolic.AverageNumberOfIndependentVoicesFeature(s).extract().vector[0],
# music21.features.jSymbolic.MaximumNumberOfIndependentVoicesFeature(s).extract().vector[0],),
# 'MostCommonPitch': music21.features.jSymbolic.MostCommonPitchFeature(s).extract().vector[0],
# 'Tempo': music21.features.jSymbolic.InitialTempoFeature(s).extract().vector[0],
# #'Duration': s.highestTime,
# #'Metadata': s.metadata if s.metadata else u'',
# 'Lyrics': s.lyrics(recurse=True) if s.lyrics(recurse=True) else u'',}]
#print music21.text.assembleLyrics(s)
#print s.show('text')
#midi = [item for item in s.recurse()]
#print midi
mf = music21.midi.translate.streamToMidiFile(s)
res = {}
res["channels"] = [ len(t.getChannels()) for t in mf.tracks ]
res["parts"] = [ p for p in s.elements ]
res["duration"] = max([ p.seconds for p in s.elements ])
self._buffer_MUSIC21 = res
return self._buffer_MUSIC21
# http://commons.wikimedia.org/wiki/File_formats
_FILETYPES = { '*': _UnknownFile,
( 'image', 'jpeg'): _JpegFile,
( 'image', 'png'): _PngFile,
( 'image', 'gif'): _GifFile,
( 'image', 'tiff'): _TiffFile,
( 'image', 'x-xcf'): _XcfFile,
( 'image', 'svg+xml'): _SvgFile, # unify/merge them?
('application', 'xml'): _SvgFile, #
('application', 'pdf'): _PdfFile,
# djvu: python-djvulibre or python-djvu for djvu support
# http://pypi.python.org/pypi/python-djvulibre/0.3.9
# ( 'image', 'vnd.djvu'): DjvuFile,
( 'audio', 'midi'): _MidiFile,
('application', 'ogg'): _OggFile,}
# ( '?', '?'): _WebMFile,}
def GenericFile(file_name):
# 'magic' (libmagic)
m = magic.open(magic.MAGIC_MIME) # or 'magic.MAGIC_NONE'
m.load()
file_mime = re.split('[/;\s]', m.file(file_name))
file_imgh = ['image', imghdr.what(file_name)] # alternative MIME ...
if file_imgh[1] and (not (file_imgh == file_mime[:2])):
pywikibot.warning(u'Issue in MIME type detection! Preferring imghdr result %s over libmagic %s!' % (file_imgh, file_mime))
file_mime = file_imgh + file_mime[2:]
mime = mimetypes.guess_all_extensions('%s/%s' % tuple(file_mime[0:2]))
if mime and (os.path.splitext(file_name)[1].lower() not in mime):
pywikibot.warning(u'File extension does not match MIME type! File extension should be %s.' % mime)
# split detection and extraction according to file types; _JpegFile, ...
GenericFile = _FILETYPES.get(tuple(file_mime[:2]), _FILETYPES['*'])
return GenericFile(file_name, file_mime)
# all classification and categorization methods and definitions - default variation
# use simplest classification I can think of (self-made) and do categorization
# mostly based on filtered/reported features
class CatImages_Default(object):
#ignore = []
ignore = ['color']
_thrhld_group_size = 4
#_thrshld_guesses = 0.1
_thrshld_default = 0.75
# for '_detect_Trained'
cascade_files = [(u'Legs', 'haarcascade_lowerbody.xml'),
(u'Torsos', 'haarcascade_upperbody.xml'),
(u'Ears', 'haarcascade_mcs_leftear.xml'),
(u'Ears', 'haarcascade_mcs_rightear.xml'),
(u'Eyes', 'haarcascade_lefteye_2splits.xml'), # (http://yushiqi.cn/research/eyedetection)
(u'Eyes', 'haarcascade_righteye_2splits.xml'), # (http://yushiqi.cn/research/eyedetection)
#externals/opencv/haarcascades/haarcascade_mcs_lefteye.xml
#externals/opencv/haarcascades/haarcascade_mcs_righteye.xml
# (others include indifferent (left and/or right) and pair)
(u'Automobiles', 'cars3.xml'), # http://www.youtube.com/watch?v=c4LobbqeKZc
(u'Hands', '1256617233-2-haarcascade-hand.xml', 300.),] # http://www.andol.info/
# ('Hands' does not behave very well, in fact it detects any kind of skin and other things...)
#(u'Aeroplanes', 'haarcascade_aeroplane.xml'),] # e.g. for 'Category:Unidentified aircraft'
# very simple / rought / poor-man's min. thresshold classification
# (done by guessing, does not need to be trained)
# replace/improve this with RTrees, KNearest, Boost, SVM, MLP, NBayes, ...
def classifyFeatures(self):
# classification of detected features (should use RTrees, KNearest, Boost, SVM, MLP, NBayes, ...)
# ??? (may be do this in '_cat_...()' or '_filter_...()' ?!?...)
# Faces and eyes (opencv pre-trained haar and extracted EXIF data)
for i in range(len(self._info['Faces'])):
if self._info['Faces'][i]['Type'] == u'Exif':
c = self._thrshld_default
else:
c = (len(self._info['Faces'][i]['Eyes']) + 2.) / 4.
self._info['Faces'][i]['Confidence'] = c
self._info['Faces'][i]['ID'] = i+1
# Segments and colors / Average color
#max_dim = max(self.image_size)
for i in range(len(self._info['ColorRegions'])):
data = self._info['ColorRegions'][i]
# has to be in descending order since only 1 resolves (!)
#if (data['Coverage'] >= 0.40) and (data['Delta_E'] <= 5.0):
# c = 1.0
##elif (data['Coverage'] >= 0.20) and (data['Delta_E'] <= 15.0):
##elif (data['Coverage'] >= 0.20) and (data['Delta_E'] <= 10.0):
#elif (data['Coverage'] >= 0.25) and (data['Delta_E'] <= 10.0):
# c = 0.75
#elif (data['Coverage'] >= 0.10) and (data['Delta_E'] <= 20.0):
# c = 0.5
#else:
# c = 0.1
ca = (data['Coverage'])**(1./7) # 0.15 -> ~0.75
#ca = (data['Coverage'])**(1./6) # 0.20 -> ~0.75
#ca = (data['Coverage'])**(1./5) # 0.25 -> ~0.75
#ca = (data['Coverage'])**(1./4) # 0.35 -> ~0.75
##cb = (0.02 * (50. - data['Delta_E']))**(1.2) # 10.0 -> ~0.75
#cb = (0.02 * (50. - data['Delta_E']))**(1./2) # 20.0 -> ~0.75
##cb = (0.02 * (50. - data['Delta_E']))**(1./3) # 25.0 -> ~0.75
#cc = (1. - (data['Delta_R']/max_dim))**(1.) # 0.25 -> ~0.75
#c = ( 3*ca + cb ) / 4
#c = ( cc + 6*ca + 2*cb ) / 9
c = ca
self._info['ColorRegions'][i]['Confidence'] = c
# People/Pedestrian (opencv pre-trained hog and haarcascade)
for i in range(len(self._info['People'])):
data = self._info['People'][i]
if (data['Coverage'] >= 0.20):
c = 0.75
if (data['Coverage'] >= 0.10): # at least 10% coverage needed
c = 0.5
else:
c = 0.1
self._info['People'][i]['Confidence'] = c
# general (opencv pre-trained, third-party and self-trained haar
# and cascade) classification
for cf in self.cascade_files:
cat = cf[0]
for i in range(len(self._info[cat])):
data = self._info[cat][i]
# detect images with this as one of the main contents only thus
# high coverage requested as a minimal confidence estimation
self._info[cat][i]['Confidence'] = (data['Coverage'])**(1./5) # 0.25 -> ~0.75
# barcode and Data Matrix recognition (libdmtx/pydmtx, zbar, gocr?)
for i in range(len(self._info['OpticalCodes'])):
self._info['OpticalCodes'][i]['Confidence'] = min(0.75*self._info['OpticalCodes'][i]['Quality']/10., 1.)
# Chessboard (opencv reference detector)
for i in range(len(self._info['Chessboard'])):
self._info['Chessboard'][i]['Confidence'] = len(self._info['Chessboard'][i]['Corners'])/49.
## Geometric object (opencv hough line, circle, edges, corner, ...)
#if self._info['Geometry']:
# self._info['Geometry'][0]['Confidence'] = 1. - self._info['Geometry'][0]['Edge_Ratio']
# Category:Unidentified people
def _cat_people_People(self):
#relevance = bool(self._info_filter['People'])
relevance = self._cat_people_Groups()[1]
return (u'Unidentified people', relevance)
# Category:Unidentified people
#def _cat_multi_People(self):
def _cat_face_People(self):
relevance = bool(self._info_filter['Faces'])
#relevance = bool(self._info_filter['People']) or relevance
return (u'Unidentified people', relevance)
# Category:Groups
def _cat_people_Groups(self):
result = self._info_filter['People']
relevance = (len(result) >= self._thrhld_group_size) and \
(not self._cat_coloraverage_Graphics()[1])
return (u'Groups', relevance)
# Category:Groups
def _cat_face_Groups(self):
result = self._info_filter['Faces']
#if not (len(result) > 1): # 5 should give 0.75 and get reported
# relevance = 0.
#else:
# relevance = 1 - 1./(len(result)-1)
relevance = (len(result) >= self._thrhld_group_size)
return (u'Groups', relevance)
# Category:Faces
def _cat_face_Faces(self):
result = self._info_filter['Faces']
#return (u'Faces', ((len(result) == 1) and (result[0]['Coverage'] >= .50)))
return (u'Faces', ((len(result) == 1) and (result[0]['Coverage'] >= .40)))
# Category:Portraits
def _cat_face_Portraits(self):
result = self._info_filter['Faces']
#return (u'Portraits', ((len(result) == 1) and (result[0]['Coverage'] >= .25)))
return (u'Portraits', ((len(result) == 1) and (result[0]['Coverage'] >= .20)))
# Category:Barcode
def _cat_code_Barcode(self):
relevance = bool(self._info_filter['OpticalCodes'])
return (u'Barcode', relevance)
# Category:Chessboards
def _cat_chess_Chessboards(self):
relevance = bool(self._info_filter['Chessboard'])
return (u'Chessboards', relevance)
# Category:Books (literature) in PDF
def _cat_text_BooksPDF(self):
pdf = u'PDF' in self._info_filter['Properties'][0]['Format']
result = self._info_filter['Text']
relevance = pdf and len(result) and \
(self._info_filter['Properties'][0]['Length'] >= 10) and \
(result[0]['Size'] >= 5E4) and (result[0]['Lines'] >= 1000)
return (u'Books (literature) in PDF', relevance)
# Category:Animated GIF
# Category:Animated PNG
# (Category:Animated SVG)
def _cat_prop_Animated_general(self):
result = self._info_filter['Properties']
relevance = result and (result[0]['Length'] > 1) and \
(result[0]['Format'] in [u'GIF', u'PNG'])
return (u'Animated %s' % result[0]['Format'], relevance)
# Category:Human ears
def _cat_ears_HumanEars(self):
relevance = bool(self._info_filter['Ears'])
return (u'Human ears', relevance)
# Category:Human eyes
def _cat_eyes_HumanEyes(self):
relevance = bool(self._info_filter['Eyes'])
return (u'Human eyes', relevance)
# Category:Ogg sound files
def _cat_streams_OggSoundFiles(self):
result = self._info_filter['Streams']
return (u'Ogg sound files', ((len(result) == 1) and
(u'audio/' in result[0]['Format']) and
(u'/midi' not in result[0]['Format'])))
# Category:Videos
def _cat_streams_Videos(self):
result = self._info_filter['Streams']
return (u'Videos', (True in [u'video/' in s['Format'] for s in result]))
# Category:Graphics
def _cat_coloraverage_Graphics(self):
result = self._info_filter['ColorAverage']
relevance = (result and result[0]['Gradient'] < 0.1) and \
(0.005 < result[0]['Peaks'] < 0.1) # black/white texts are below that
#(result[0]['FFT_Peaks'] < 0.2) # has to be tested first !!!
return (u'Graphics', bool(relevance))
# Category:MIDI files created with GNU LilyPond
# Category:Bitmap_from_Inkscape (png)
# Category:Created_with_Inkscape (svg)
# Category:Created_with_MATLAB (png)
# Category:Created_with_MATLAB (svg)
# Category:Created_with_PLOT2SVG (svg) [new]
# Category:Created_with_ImageMagick (jpg)
# Category:Created_with_Adobe_ImageReady (png)
# Category:Created_with_Adobe_Photoshop (jpg)
# Category:Created_with_Picasa (jpg)
# Category:Created_with_Qtpfsgui (jpg)
# Category:Created_with_Autopano (jpg)
# Category:Created_with_Xmgrace (png)
# Category:Created_with_darktable (jpg)
# Category:Created_with_easyHDR (jpg)
# Category:Created_with_GIMP (jpg) [new]
# Category:Created_with_R (svg)
# Category:Created_with_VectorFieldPlot (svg)
# Category:Created_with_Chemtool (svg)
# Category:Created_with_GNU_Octave (svg)
# Category:Created_with_GeoGebra (svg)
# Category:Created_with_Stella (png)
# Category:Created_with_PhotoStitch (jpg)
# Category:Created_with_Scribus (pdf)
# Category:Created_with_OpenOffice.org (pdf)
# Category:Created_with_Tux_Paint (pdf)
# Category:Created_with_Microsoft_Image_Composite_Editor (jpg)
def _cat_meta_and_history_general(self):
results = self._info_filter['Metadata'] +\
[{'*': item['Software']} for item in self._info_filter['History']]
cats = set()
for key, magic, cat in [('Desc', u"Generated automatically by: GNU LilyPond", u'MIDI files created with GNU LilyPond'),
('Software', u"www.inkscape.org", u'Bitmap from Inkscape'),
('Misc', u"org.inkscape.output.svg.inkscape", u'Created with Inkscape'), # 'Output_extension'
('Software', u"MATLAB, The Mathworks, Inc.", u'Created with MATLAB'),
('Desc', u"Matlab Figure", u'Created with MATLAB'),
('Desc', u"Converted by PLOT2SVG", u'Created with PLOT2SVG'),
('Software', u"ImageMagick", u'Created with ImageMagick'),
('Software', u"Adobe ImageReady", u'Created with Adobe ImageReady'),
('Software', u"Adobe Photoshop", u'Created with Adobe Photoshop'),
('Software', u"Picasa", u'Created with Picasa'),
('Software', u"Created with opensource tool Qtpfsgui", u'Created with Qtpfsgui'),
('Software', u"Autopano", u'Created with Autopano'),
('Software', u"Grace", u'Created with Xmgrace'),
('Software', u"darktable", u'Created with darktable'),
('Software', u"Tux Paint", u'Created with Tux Paint'),
('Software', u"Microsoft ICE", u'Created with Microsoft Image Composite Editor'),
('Software', u"easyHDR", u'Created with easyHDR'),
('Comment', u"easyHDR", u'Created with easyHDR'),
('Software', u"GIMP", u'Created with GIMP'),
('Comment', u"Created with GIMP", u'Created with GIMP'),
('Desc', u"R SVG", u'Created with R'),
('Desc', u"created with VectorFieldPlot", u'Created with VectorFieldPlot'),
('Desc', u"Created with Chemtool", u'Created with Chemtool'),
('Desc', u"Produced by GNUPLOT", u'Created with GNU Octave'),
('Misc', u"geogebra.d.W", u'Created with GeoGebra'), # 'DescProducer'
('Comment', u"Created using Stella4D", u'Created with Stella'),
('Comment', u"LEAD Technologies Inc.", u'Created with PhotoStitch'),
('Producer', u"Scribus PDF Library", u'Created with Scribus'),
('Producer', u"OpenOffice.org", u'Created with OpenOffice.org'),]:
for result in results:
relevance = ((key in result) or ('*' in result)) and \
(magic in result.get(key, result.get('*')))
if relevance:
cats.add( cat )
return (list(cats), bool(len(cats)))
# Category:Categorized by DrTrigonBot
def _addcat_BOT(self):
# - ALWAYS -
return (u"Categorized by DrTrigonBot", True)
# (Category:BMP)
# (Category:PNG)
# (Category:JPEG)
# Category:TIFF files
# (may be more image formats/extensions according to PIL, e.g. SVG, ...)
# Category:PDF files
def _addcat_prop_general(self):
fmt = self._info_filter['Properties'][0]['Format']
if u'TIFF' in fmt:
fmt = u'TIFF images'
#elif u'SVG' in fmt:
# # additional to PIL (rsvg, ...)
# # should be added as template instead of category (!)
# fmt = u''
elif u'PDF' in fmt:
# additional to PIL (...)
fmt = u'PDF files'
else:
# disable ALL categorization, except the listed exceptions above
# (BMP, PNG, JPEG, OGG; no general catgeory available, ...)
fmt = u''
# PIL: http://www.pythonware.com/library/pil/handbook/index.htm
return (fmt, bool(fmt))
# # TODO: add templates (conditional/additional like 'addcat')
# # Category:SVG - Category:Valid SVG - Category:Invalid SVG
# # {{ValidSVG}} - {{InvalidSVG}}
# def _addtempl_prop_SVN(self):
# fmt = self._info_filter['Properties'][0]['Format']
# d = { u'Valid SVG': u'{{ValidSVG}}',
# u'Invalid SVG': u'{{InvalidSVG}}', }
# fmt = d.get(fmt, u'')
#
# return (fmt, bool(fmt))
# # Category:Unidentified people
# def _guess_Classify_People(self):
# pass
# # Category:Unidentified maps
# def _guess_Classify_Maps(self):
# pass
# # Category:Unidentified flags
# def _guess_Classify_Flags(self):
# pass
# # Category:Unidentified plants
# def _guess_Classify_Plants(self):
# pass
# # Category:Unidentified coats of arms
# def _guess_Classify_CoatsOfArms(self):
# pass
# # Category:Unidentified buildings
# def _guess_Classify_Buildings(self):
# pass
# # Category:Unidentified trains
# def _guess_Classify_Trains(self):
# pass
# # Category:Unidentified automobiles
# def _guess_Classify_Automobiles(self):
# pass
# # Category:Unidentified buses
# def _guess_Classify_Buses(self):
# pass
# Category:Human legs
def _guess_legs_HumanLegs(self):
result = self._info_filter['Legs']
return (u'Human legs', ((len(result) == 1) and (result[0]['Coverage'] >= .40)))
# Category:Human torsos
def _guess_torsos_HumanTorsos(self):
result = self._info_filter['Torsos']
return (u'Human torsos', ((len(result) == 1) and (result[0]['Coverage'] >= .40)))
# Category:Automobiles
def _guess_automobiles_Automobiles(self):
result = self._info_filter['Automobiles']
return (u'Automobiles', ((len(result) == 1) and (result[0]['Coverage'] >= .40)))
## Category:Hands
#def _guess_hands_Hands(self):
# result = self._info_filter['Hands']
#
# return (u'Hands', ((len(result) == 1) and (result[0]['Coverage'] >= .50)))
# Category:Black ( 0, 0, 0)
# Category:Blue ( 0, 0, 255)
# Category:Brown (165, 42, 42)
# Category:Green ( 0, 255, 0)
# Category:Orange (255, 165, 0)
# Category:Pink (255, 192, 203)
# Category:Purple (160, 32, 240)
# Category:Red (255, 0, 0)
# Category:Turquoise ( 64, 224, 208)
# Category:White (255, 255, 255)
# Category:Yellow (255, 255, 0)
# http://www.farb-tabelle.de/en/table-of-color.htm
#def _collectColor(self):
#def _cat_color_Black(self):
# info = self._info_filter['ColorRegions']
# for item in info:
# if (u'Black' == item[u'Color']):
# return (u'Black', True)
# return (u'Black', False)
def __cat_color_general(self, col):
info = self._info_filter['ColorRegions']
for item in info:
if (col == item[u'Color']):
return (col, True)
return (col, False)
_cat_color_Black = lambda self: self.__cat_color_general(u'Black')
_cat_color_Blue = lambda self: self.__cat_color_general(u'Blue')
_cat_color_Brown = lambda self: self.__cat_color_general(u'Brown')
_cat_color_Green = lambda self: self.__cat_color_general(u'Green')
_cat_color_Orange = lambda self: self.__cat_color_general(u'Orange')
_cat_color_Pink = lambda self: self.__cat_color_general(u'Pink')
_cat_color_Purple = lambda self: self.__cat_color_general(u'Purple')
_cat_color_Red = lambda self: self.__cat_color_general(u'Red')
_cat_color_Turquoise = lambda self: self.__cat_color_general(u'Turquoise')
_cat_color_White = lambda self: self.__cat_color_general(u'White')
_cat_color_Yellow = lambda self: self.__cat_color_general(u'Yellow')
# all classification and categorization methods and definitions - SVM variation
# use 'pyml' SVM (libsvm) classifier
# may be 'scikit-learn' or 'opencv' (svm, a.o.) could be of some use too
class CatImages_SVM(CatImages_Default):
trained_cat = [u'Human_ears', u'Male faces']
# dummy: deactivated
def classifyFeatures(self):
for key in self._info:
for i in range(len(self._info[key])):
self._info[key][i]['Confidence'] = 1.0
# (all trained categories)
# http://scipy-lectures.github.com/advanced/scikit-learn/index.html
# http://mlpy.sourceforge.net/docs/3.5/index.html
# http://docs.opencv.org/modules/ml/doc/ml.html
def _cat_multi_generic(self):
# IT LOOKS LIKE (MAY BE) scikit-learn IS BETTER AND HAS MORE OPTIONS THAN pyml ... ?!!!
# create classifier feature set
# !!!currently number of detected features is used only -> lots of room for improvements!!!
features = []
for key in sorted(self._info):
#print key, len(self._info[key]), self._info[key]
features.append( len(self._info[key]) )
features = np.array(features)
linear_svm = mlpy.LibSvm().load_model('cache/test.csf')
yp = linear_svm.pred(features)
cat = self.trained_cat[int(yp)-1]
#print linear_svm.labels()
# confidence of match?
return (cat, True)
# Image by content categorization derived from 'checkimages.py'.
class CatImagesBot(checkimages.checkImagesBot, CatImages_Default):
#class CatImagesBot(checkimages.checkImagesBot, CatImages_SVM):
# def __init__(self, site, logFulNumber = 25000, sendemailActive = False,
# duplicatesReport = False, logFullError = True): pass
# def setParameters(self, imageName): pass
# or may be '__init__' ... ???
def load_licenses(self):
#pywikibot.output(u'\n\t...Listing the procedures available...\n')
pywikibot.output(u'\n\t...Listing the procedures used...\n')
self._funcs = {'filter': [], 'cat': [], 'addcat': [], 'guess': []}
for item in dir(self):
s = item.split('_')
if (len(s) < 3) or (s[1] not in self._funcs) or (s[2] in self.ignore):
continue
pywikibot.output( item )
self._funcs[s[1]].append( item )
self.tmpl_available_spec = tmpl_available_spec
gen = pagegenerators.PrefixingPageGenerator(prefix = u'Template:FileContentsByBot/')
buf = []
for item in gen:
item = item.title()
if (item[-4:] == "/doc"): # all docs
continue
item = os.path.split(item)[1]
if (item[0].lower() == item[0]): # e.g. 'generic'
continue
buf.append( item )
if buf:
self.tmpl_available_spec = buf
pywikibot.output( u'\n\t...Following specialized templates found, check them since they are used now...\n' )
pywikibot.output( u'tmpl_available_spec = [ %s ]\n' % u", ".join(buf) )
return []
def downloadImage(self):
#print self.image_path
pywikibot.output(u'Processing media %s ...' % self.image.title(asLink=True))
image_filename = os.path.split(self.image.fileUrl())[-1]
self.image_path = urllib2.quote(os.path.join(scriptdir, ('cache/' + image_filename[-128:])))
self._wikidata = self.image._latestInfo # all info wikimedia got from content (mime, sha1, ...)
#print self._wikidata
#print self._wikidata['mime']
#print self._wikidata['sha1']
#print self._wikidata['metadata']
#for item in self._wikidata['metadata']:
# print item['name'], item['value']
if not os.path.exists(self.image_path):
pywikibot.get_throttle()
f_url, data = self.site.getUrl(self.image.fileUrl(), no_hostname=True,
back_response=True)
# needed patch for 'getUrl' applied upstream in r10441
# (allows to re-read from back_response)
data = f_url.read()
del f_url # free some memory (no need to keep a copy...)
f = open(self.image_path, 'wb')
f.write( data )
f.close()
# LOOK ALSO AT: checkimages.CatImagesBot.checkStep
# (and category scripts/bots too...)
def checkStep(self):
self.thrshld = self._thrshld_default
self._info = {} # used for LOG/DEBUG OUTPUT ONLY
self._info_filter = {} # used for CATEGORIZATION
self._result_check = []
self._result_add = []
self._result_guess = []
# flush internal buffers
for attr in ['_buffer_EXIF', '_buffer_FFMPEG', '_buffer_Geometry']:#, '_content_text']:
if hasattr(self, attr):
delattr(self, attr)
# gather all features (information) related to current image
self.gatherFeatures()
# classification of detected features (should use RTrees, KNearest, Boost, SVM, MLP, NBayes, ...)
# ??? (may be do this in '_cat_...()' or '_filter_...()' ?!?...)
# http://opencv.itseez.com/doc/tutorials/ml/introduction_to_svm/introduction_to_svm.html
# http://stackoverflow.com/questions/8687885/python-opencv-svm-implementation
# https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/python2/letter_recog.py?rev=6480
self.classifyFeatures() # assign confidences
# replace/improve this with RTrees, KNearest, Boost, SVM, MLP, NBayes, ...
# information template: use filter to select from gathered features
# the ones that get reported
self._info_filter = {}
for item in self._funcs['filter']:
self._info_filter.update( getattr(self, item)() )
# categorization: use explicit searches for classification (rel = ?)
for item in self._funcs['cat']:
(cats, rel) = getattr(self, item)()
#print cat, result, len(result)
if not isinstance(cats, list): # because of 'Histroy' and '_cat_meta_and_history_general'
cats = [cats] # which return multiple results...
if rel:
for cat in cats:
self._result_check.append( cat )
self._result_check = list(set(self._result_check))
# categorization: conditional (only if the ones before are present)
# (does not trigger report to page)
for item in self._funcs['addcat']:
(cat, rel) = getattr(self, item)()
#print cat, result, len(result)
if rel:
self._result_add.append( cat )
self._result_add = list(set(self._result_add))
# categorization: use guesses for unreliable classification (rel = 0.1)
if not useGuesses:
return self._result_check
for item in self._funcs['guess']:
(cat, rel) = getattr(self, item)()
#print cat, result, len(result)
if rel:
self._result_guess.append( cat )
return self._result_check
def tag_image(self):
self.clean_cache()
#if not self._existInformation(self._info_filter): # information available?
if not (self._result_check + self._result_guess): # category available?
return False
pywikibot.get_throttle()
content = self.image.get()
# check the type of template used on page; Information, Artwork, ...
for temp in [u"Information", u"Artwork"]:
pos = content.find(u'{{%s' % temp) + 2
if pos > 1:
break
if pos > 1:
# cosmetic changes: format the page well to have '\n\n' after the template
diff = content[:(pos-2)].count(u'{{') - content[:(pos-2)].count(u'}}')
while (content[:pos].count(u'{{') - content[:pos].count(u'}}')) != diff:
pos = content.find(u'}}', pos) + 2
if content[pos:(pos+2)] != (u"\n"*2):
content = content[:pos] + (u"\n"*2) + content[pos:].lstrip()
else:
pywikibot.warning(u'Page layout issue; Information template could '
u'not be found and thus the data not appended!')
return False
# append template and fill it with data
content = self._append_to_template(content, temp, tmpl_FileContentsByBot)
for i, key in enumerate(self._info_filter):
item = self._info_filter[key]
info = self._make_infoblock(key, item)
if info:
content = self._append_to_template(content, u"FileContentsByBot", info)
# append categories
tags = set([])
for i, cat in enumerate(list(set(self._result_check + self._result_add))):
tags.add( u"[[:Category:%s]]" % cat )
content = pywikibot.replaceCategoryLinks(content, [cat], site=self.site, addOnly=True)
# cleanup double categories, remove obsolete ones and add templates
content = pywikibot.replaceCategoryLinks( content,
list(set(pywikibot.getCategoryLinks(content, site=self.site))),
site=self.site )
content = self._remove_category_or_template(content, u"Uncategorized") # template
content = self._add_template(content, u"Check categories|year={{subst:#time:Y}}|month={{subst:#time:F}}|day={{subst:#time:j}}|category=[[Category:Categorized by DrTrigonBot]]", top=True)
# add category guesses
for i, cat in enumerate(self._result_guess):
content += u"\n<!--DrTrigonBot-guess-- [[Category:%s]] -->" % cat
# verbosely output info about changes and apply them
pywikibot.output(u"--- " * 20)
pywikibot.output(content)
pywikibot.output(u"--- " * 20)
pywikibot.put_throttle()
self.image.put( content, comment="bot automatic categorization; adding %s" % u", ".join(tags),
botflag=False )
# TODO: (work-a-round if https://bugzilla.wikimedia.org/show_bug.cgi?id=6421 not solved)
# if hasattr(self, '_content_text'):
# textpage = pywikibot.Page(self.site, os.path.join(self.image.title(), u'Contents/Text'))
# textpage.put( self._content_text, comment="bot adding content from %s" % textpage.title(asLink=True),
# botflag=False )
return True
def log_output(self):
# ColorRegions always applies here since there is at least 1 (THE average) color...
ignore = ['Properties', 'Metadata', 'ColorAverage', 'ColorRegions', 'Geometry']
#if not self._existInformation(self._info): # information available?
# information available? AND/OR category available?
if not (self._existInformation(self._info, ignore = ignore) or self._result_check):
return u""
ret = []
ret.append( u"" )
ret.append( u"== [[:%s]] ==" % self.image.title() )
ret.append( u'{|' )
ret.append( u'|<div style="position:relative;">' )
ret.append( u"[[%s|200px]]" % self.image.title() )
ret.append( self._make_markerblock(self._info[u'Faces'], 200.,
structure=['Position', 'Eyes', 'Mouth', 'Nose']) )
ret.append( self._make_markerblock(self._info[u'People'], 200.,
line='dashed') )
ret.append( u"</div>" )
ret.append( u'|<div style="position:relative;">' )
ret.append( u"[[%s|200px]]" % self.image.title() )
ret.append( self._make_markerblock(self._info[u'ColorRegions'], 200.) )
ret.append( self._make_markerblock(self._info[u'OpticalCodes'], 200.,
line='dashed') )
ret.append( u"</div>" )
ret.append( u'|<div style="position:relative;">' )
ret.append( u"[[%s|200px]]" % self.image.title() )
ret.append( self._make_markerblock(self._info[u'Ears'], 200.) )
ret.append( self._make_markerblock(self._info[u'Eyes'], 200.) )
ret.append( self._make_markerblock(self._info[u'Legs'], 200.,
line='dashed') )
ret.append( self._make_markerblock(self._info[u'Torsos'], 200.,
line='dashed') )
ret.append( self._make_markerblock(self._info[u'Automobiles'], 200.,
line='dashed') )
#ret.append( self._make_markerblock(self._info[u'Hands'], 200.,
# line='dashed') )
ret.append( u"</div>" )
ret.append( u'|}' )
color = {True: "rgb(0,255,0)", False: "rgb(255,0,0)"}[bool(self._result_check + self._result_guess)]
ret.append( u"<div style='background:%s'>'''automatic categorization''': %s</div>" % (color, u", ".join(list(set(self._result_check + self._result_add)))) )
buf = []
for i, key in enumerate(self._info):
item = self._info[key]
info = self._make_infoblock(key, item, [])
if info:
buf.append( info )
ret.append( tmpl_FileContentsByBot[3:] + u"\n" + u"\n".join( buf ) + u"\n}}" )
return u"\n".join( ret )
def clean_cache(self):
if os.path.exists(self.image_path):
os.remove( self.image_path )
#if os.path.exists(self.image_path_JPEG):
# os.remove( self.image_path_JPEG )
##image_path_new = self.image_path_JPEG.replace(u"cache/", u"cache/0_DETECTED_")
##if os.path.exists(image_path_new):
## os.remove( image_path_new )
# LOOK ALSO AT: checkimages.CatImagesBot.report
def report(self):
tagged = self.tag_image()
logged = self.log_output()
return (tagged, logged)
def _make_infoblock(self, cat, res, tmpl_available=None):
""" Create infoblocks for pasting into wikitext from Templates
available on the wiki.
Nested values are flattened and numbered for output. Invalid or
unknown values can be marked e.g. by using u'-' or None.
Values like None, [] (empty list), ... that resolve by bool() to
False are hidden/omitted and not outputted at all. Unknown values
should be hidden to save space (make human readable) and be handled
by the Templates.
Unknown values that are NEEDED should be set to u'-' everything
else (not needed) to None, [] and so on.
"""
if not res:
return u''
if (tmpl_available == None):
tmpl_available = self.tmpl_available_spec
generic = (cat not in tmpl_available)
titles = res[0].keys()
if not titles:
return u''
result = []
#result.append( u'{{(!}}style="background:%s;"' % {True: 'green', False: 'red'}[report] )
if generic:
result.append( u"{{FileContentsByBot/generic|name=%s|" % cat )
buf = dict([ (key, []) for key in titles ])
for item in res:
for key in titles:
buf[key].append( self._output_format(item[key]) )
for key in titles:
result.append( u" {{FileContentsByBot/generic|name=%s|value=%s}}" % (key, u"; ".join(buf[key])) )
else:
result.append( u"{{FileContentsByBot/%s|" % cat )
for item in res:
result.append( u" {{FileContentsByBot/%s" % cat )
for key in titles:
if item[key]: # hide/omit (work-a-round for empty 'Eyes')
result.append( self._output_format_flatten(key, item[key]) )
result.append( u" }}" )
result.append( u"}}" )
return u"\n".join( result )
def _output_format(self, value):
if (type(value) == type(float())):
# round/strip floats
return "%.3f" % value
else:
# output string representation of variable
return str(value)
def _output_format_flatten(self, key, value):
# flatten structured varible recursively
if (type(value) == type(tuple())) or (type(value) == type(list())):
buf = []
for i, t in enumerate(value):
buf.append( self._output_format_flatten(key + (u"-%02i" % i), t) )
return u"\n".join( buf )
else:
# end of recursion
return u" | %s = %s" % (key, self._output_format(value))
def _make_markerblock(self, res, size, structure=['Position'], line='solid'):
# same as in '_detect_Faces'
colors = [ (0,0,255),
(0,128,255),
(0,255,255),
(0,255,0),
(255,128,0),
(255,255,0),
(255,0,0),
(255,0,255) ]
result = []
for i, r in enumerate(res):
if ('RGB' in r):
color = list(np.array((255,255,255))-np.array(r['RGBref']))
else:
color = list(colors[i%8])
color.reverse()
color = u"%02x%02x%02x" % tuple(color)
#scale = r['size'][0]/size
scale = self.image_size[0]/size
f = list(np.array(r[structure[0]])/scale)
result.append( u'<div class="%s-marker" style="position:absolute; left:%ipx; top:%ipx; width:%ipx; height:%ipx; border:2px %s #%s;"></div>' % tuple([structure[0].lower()] + f + [line, color]) )
for ei in range(len(structure)-1):
data = r[structure[ei+1]]
if data and (not hasattr(data[0], '__iter__')): # Mouth and Nose are not lists
data = [ r[structure[ei+1]] ]
for e in data:
e = list(np.array(e)/scale)
result.append( u'<div class="%s-marker" style="position:absolute; left:%ipx; top:%ipx; width:%ipx; height:%ipx; border:2px solid #%s;"></div>' % tuple([structure[ei+1].lower()] + e + [color]) )
return u"\n".join( result )
# place into 'textlib' (or else e.g. 'catlib'/'templib'...)
def _remove_category_or_template(self, text, name):
text = re.sub(u"[\{\[]{2}%s.*?[\}\]]{2}\n?" % name, u"", text)
return text
# place into 'textlib'
def _add_template(self, text, name, params={}, top=False, raw=False):
if top:
buf = [(u"{{%s}}" % name), text]
else:
if raw:
buf = [text, name]
else:
buf = [text, (u"{{%s}}" % name)]
return u"\n".join( buf )
# place into 'textlib' (or else e.g. 'catlib'/'templib'...)
def _append_to_template(self, text, name, append):
# mask/search template to append to
pattern = re.compile(u"(\{\{%s.*?\n)(\s*\}\}\n{2})" % name, flags=re.S)
template = pattern.search(text).groups()
# append to template
template = u"".join( [template[0], append, u"\n", template[1]] )
# apply changes
text = pattern.sub(template, text)
return text
# gather data from all information interfaces
def gatherFeatures(self):
# split detection and extraction according to file types; _JpegFile, ...
with GenericFile(self.image_path) as gf:
gf.image = self.image # patch for _SvgFile needing url
for func in ['getProperties', 'getFeatures']:
result = getattr(gf, func)()
self._info.update(result)
self.image_size = gf.image_size
def _existInformation(self, info, ignore = ['Properties', 'Metadata', 'ColorAverage']):
result = []
for item in info:
if item in ignore:
continue
if info[item]:
result.append( item )
return result
def _filter_Properties(self):
# >>> never drop <<<
result = self._info['Properties']
return {'Properties': result}
def _filter_Metadata(self):
## >>> never drop <<<
#result = self._info['Metadata']
ok = False
for item in self._info['Metadata'][0]:
ok = ok or (self._info['Metadata'][0][item] != u'-')
return {'Metadata': self._info['Metadata'] if ok else []}
def _filter_Faces(self):
result = self._info['Faces']
if (len(result) < self._thrhld_group_size):
buf = []
for item in self._info['Faces']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
buf.append( item )
result = buf
return {'Faces': result}
def _filter_People(self):
result = self._info['People']
if (len(result) < self._thrhld_group_size):
buf = []
for item in self._info['People']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
buf.append( item )
result = buf
return {'People': result}
def _filter_ColorRegions(self):
#result = {}
result = []
for item in self._info['ColorRegions']:
## >>> drop wrost ones... (ignore all below 0.2) <<<
#if (result.get(item['Color'], {'Confidence': 0.2})['Confidence'] < item['Confidence']):
# result[item['Color']] = item
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
#return {'ColorRegions': [result[item] for item in result]}
return {'ColorRegions': result}
def _filter_ColorAverage(self):
# >>> never drop <<<
result = self._info['ColorAverage']
return {'ColorAverage': result}
def _filter_OpticalCodes(self):
# use all, since detection should be very reliable
#result = self._info['OpticalCodes']
result = []
for item in self._info['OpticalCodes']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'OpticalCodes': result}
def _filter_Chessboard(self):
# use all, since detection should be very reliable
result = self._info['Chessboard']
return {'Chessboard': result}
def _filter_Text(self):
# use all, since detection should be very reliable
result = self._info['Text']
return {'Text': result}
def _filter_Legs(self):
result = []
for item in self._info['Legs']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'Legs': result}
def _filter_Torsos(self):
result = []
for item in self._info['Torsos']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'Torsos': result}
def _filter_Ears(self):
result = []
for item in self._info['Ears']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'Ears': result}
def _filter_Eyes(self):
result = []
for item in self._info['Eyes']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'Eyes': result}
def _filter_Automobiles(self):
result = []
for item in self._info['Automobiles']:
# >>> drop if below thrshld <<<
if (item['Confidence'] >= self.thrshld):
result.append( item )
return {'Automobiles': result}
def _filter_Streams(self):
# use all, (should be reliable)
result = self._info['Streams']
return {'Streams': result}
def _filter_History(self):
# use all, (should be reliable)
result = self._info['History']
return {'History': result}
# def _filter_Audio(self):
# # use all, (should be reliable)
# result = self._info['Audio']
# return {'Audio': result}
#def _filter_Geometry(self):
# result = []
# for item in self._info['Geometry']:
# # >>> drop if below thrshld <<<
# if (item['Confidence'] >= self.thrshld):
# result.append( item )
# return {'Geometry': result}
#def _filter_Hands(self):
# result = []
# for item in self._info['Hands']:
# # >>> drop if below thrshld <<<
# if (item['Confidence'] >= self.thrshld):
# result.append( item )
# return {'Hands': result}
# def _filter_Classify(self):
# from operator import itemgetter
# result = sorted(self._info['Classify'][0].items(), key=itemgetter(1))
# result.reverse()
# pywikibot.output(u' Best: %s' % result[:3] )
# pywikibot.output(u'Worst: %s' % result[-3:] )
#
# # >>> dummy: drop all (not reliable yet since untrained) <<<
# return {'Classify': []}
def main():
""" Main function """
global useGuesses
# Command line configurable parameters
limit = 150 # How many images to check?
# untagged = False # Use the untagged generator
sendemailActive = False # Use the send-email
train = False
generator = None
# default
if len(sys.argv) < 2:
sys.argv += ['-cat']
# debug: 'python catimages.py -debug'
# run/test: 'python catimages.py [-start:File:abc]'
sys.argv += ['-family:commons', '-lang:commons']
#sys.argv += ['-noguesses']
# try to resume last run and continue
if os.path.exists( os.path.join(scriptdir, 'cache/catimages_start') ):
shutil.copy2(os.path.join(scriptdir, 'cache/catimages_start'), os.path.join(scriptdir, 'cache/catimages_start.bak'))
posfile = open(os.path.join(scriptdir, 'cache/catimages_start'), "r")
firstPageTitle = posfile.read().decode('utf-8')
posfile.close()
else:
firstPageTitle = None
# Here below there are the parameters.
for arg in pywikibot.handleArgs():
if arg.startswith('-limit'):
if len(arg) == 7:
limit = int(pywikibot.input(u'How many files do you want to check?'))
else:
limit = int(arg[7:])
# elif arg == '-sendemail':
# sendemailActive = True
elif arg.startswith('-start'):
if len(arg) == 6:
firstPageTitle = None
elif len(arg) > 6:
firstPageTitle = arg[7:]
#firstPageTitle = firstPageTitle.split(":")[1:]
#generator = pywikibot.getSite().allpages(start=firstPageTitle, namespace=6)
elif arg.startswith('-cat'):
if len(arg) == 4:
catName = u'Media_needing_categories'
elif len(arg) > 4:
catName = str(arg[5:])
catSelected = catlib.Category(pywikibot.getSite(), 'Category:%s' % catName)
generator = pagegenerators.CategorizedPageGenerator(catSelected, recurse = True)
# elif arg.startswith('-untagged'):
# untagged = True
# if len(arg) == 9:
# projectUntagged = str(pywikibot.input(u'In which project should I work?'))
# elif len(arg) > 9:
# projectUntagged = str(arg[10:])
elif arg == '-noguesses':
useGuesses = False
elif arg.startswith('-single'):
if len(arg) > 7:
pageName = unicode(arg[8:])
if 'File:' not in pageName:
pageName = 'File:%s' % pageName
generator = [ pywikibot.Page(pywikibot.getSite(), pageName) ]
firstPageTitle = None
elif arg.startswith('-train'):
train = True
generator = None
# Understand if the generator is present or not.
if not generator:
pywikibot.output(u'no generator defined... EXIT.')
sys.exit()
# Define the site.
site = pywikibot.getSite()
# Block of text to translate the parameters set above.
image_old_namespace = u"%s:" % site.image_namespace()
image_namespace = u"File:"
# A little block-statement to ensure that the bot will not start with en-parameters
if site.lang not in project_inserted:
pywikibot.output(u"Your project is not supported by this script. You have to edit the script and add it!")
return
# Defing the Main Class.
Bot = CatImagesBot(site, sendemailActive = sendemailActive,
duplicatesReport = False, logFullError = False)
# # Untagged is True? Let's take that generator
# if untagged == True:
# generator = Bot.untaggedGenerator(projectUntagged, limit)
# Ok, We (should) have a generator, so let's go on.
# Take the additional settings for the Project
Bot.takesettings()
# do classifier training on good (homgenous) commons categories
if train:
trainbot(generator, Bot, image_old_namespace, image_namespace)
return
# Not the main, but the most important loop.
outresult = []
for image in generator:
if firstPageTitle:
if (image.title() == firstPageTitle):
pywikibot.output( u"found last page '%s' ..." % image.title() )
firstPageTitle = None
continue
else:
#pywikibot.output( u"skipping page '%s' ..." % image.title() )
continue
# recover from hard crash in the run before, thus skip one more page
if os.path.exists( os.path.join(scriptdir, 'cache/catimages_recovery') ):
pywikibot.output( u"trying to recover from hard crash, skipping page '%s' ..." % image.title() )
disable_recovery()
# in case the next one has a hard-crash too...
posfile = open(os.path.join(scriptdir, 'cache/catimages_start'), "w")
posfile.write( image.title().encode('utf-8') )
posfile.close()
continue
#comment = None # useless, also this, let it here for further developments
try:
imageName = image.title().split(image_namespace)[1] # Deleting the namespace (useless here)
except IndexError:# Namespace image not found, that's not an image! Let's skip...
try:
imageName = image.title().split(image_old_namespace)[1]
except IndexError:
pywikibot.output(u"%s is not a file, skipping..." % image.title())
continue
Bot.setParameters(imageName) # Setting the image for the main class
try:
Bot.downloadImage()
except IOError, err:
# skip if download not possible
pywikibot.warning(u"%s, skipped..." % err)
continue
except:
# skip on any unexpected error, but report it
pywikibot.exception(tb=True)
pywikibot.error(u"was not able to process page %s !!!\n" %\
image.title(asLink=True))
continue
resultCheck = Bot.checkStep()
tagged = False
try:
(tagged, ret) = Bot.report()
if ret:
outresult.append( ret )
except AttributeError:
pywikibot.exception(tb=True)
pywikibot.error(u"was not able to process page %s !!!\n" %\
image.title(asLink=True))
limit += -1
if not tagged:
posfile = open(os.path.join(scriptdir, 'cache/catimages_start'), "w")
posfile.write( image.title().encode('utf-8') )
posfile.close()
if limit <= 0:
break
if resultCheck:
continue
if outresult:
outpage = pywikibot.Page(site, u"User:DrTrigon/User:DrTrigonBot/logging")
#outresult = [ outpage.get() ] + outresult # append to page
outresult = u"\n".join(outresult)
pywikibot.output(u"Size of log page data: %s byte(s)" % len(outresult))
# work-a-round: write pages mutliple times if content is too large in order to circumvent
# "HTTPError: 504 Gateway Time-out" leading finally to "MaxTriesExceededError"
# (why is that...?!?? FIX THIS in the framework core e.g. 'postForm'!)
tmp = outresult
while tmp:
i = np.array([m.start() for m in re.finditer(u"\n\n==", tmp)]
+ [len(tmp)])
#pos = i[ np.where((i - 2048*1024) <= 0)[0][-1] ] # $wgMaxArticleSize
pos = i[ np.where((i - 500*1024) <= 0)[0][-1] ]
pywikibot.output(u"Size of bunch to write: %s byte(s)" % len(tmp[:pos]))
outpage.put( tmp[:pos], comment="bot writing log for last run" )
tmp = tmp[pos:]
if pywikibot.simulate:
#print u"--- " * 20
#print u"--- " * 20
#print outresult
posfile = open(os.path.join(scriptdir, 'cache/catimages.log'), "a")
posfile.write( outresult )
posfile.close()
# http://scipy-lectures.github.com/advanced/scikit-learn/index.html
# http://mlpy.sourceforge.net/docs/3.5/index.html
# http://docs.opencv.org/modules/ml/doc/ml.html
# train pyml (svm), opencv BoW and haarcascade classifiers
# choose a good and meaningful featureset from extracted (better than actual one)
def trainbot(generator, Bot, image_old_namespace, image_namespace):
# IT LOOKS LIKE (MAY BE) scikit-learn IS BETTER AND HAS MORE OPTIONS THAN pyml ... ?!!!
# gather training dataset from wiki commons categories
trainset = []
for i, catName in enumerate(Bot.trained_cat):
catSelected = catlib.Category(pywikibot.getSite(), 'Category:%s' % catName)
generator = pagegenerators.CategorizedPageGenerator(catSelected)
for image in generator:
try:
imageName = image.title().split(image_namespace)[1] # Deleting the namespace (useless here)
except IndexError:# Namespace image not found, that's not an image! Let's skip...
try:
imageName = image.title().split(image_old_namespace)[1]
except IndexError:
pywikibot.output(u"%s is not a file, skipping..." % image.title())
continue
Bot.setParameters(imageName) # Setting the image for the main class
try:
Bot.downloadImage()
except IOError, err:
# skip if download not possible
pywikibot.warning(u"%s, skipped..." % err)
continue
except Exception, err:
# skip on any unexpected error, but report it
pywikibot.error(u"%s" % err)
pywikibot.error(u"was not able to process page %s !!!\n" %\
image.title(asLink=True))
continue
# gather all features (information) related to current image
Bot._info = {}
Bot.gatherFeatures()
# create classifier feature set
# !!!currently number of detected features is used only -> lots of room for improvements!!!
# choose a good and meaningful featureset from extracted (better than actual one)
features = []
for key in sorted(Bot._info):
#print key, len(self._info[key]), self._info[key]
features.append( len(Bot._info[key]) )
features.append( i+1 ) # category id (returned by predictor later)
#print features
trainset.append( features )
trainset = np.array(trainset)
cols = trainset.shape[1]
# http://mlpy.sourceforge.net/docs/3.5/tutorial.html
import matplotlib.pyplot as plt # required for plotting
##iris = np.loadtxt('iris.csv', delimiter=',')
##x, y = iris[:, :4], iris[:, 4].astype(np.int) # x: (observations x attributes) matrix, y: classes (1: setosa, 2: versicolor, 3: virginica)
#trainset = np.loadtxt('cache/test.csv', delimiter=' ')
#cols = trainset.shape[1]
#print trainset
x, y = trainset[:, :(cols-1)], trainset[:, (cols-1)].astype(np.int) # x: (observations x attributes) matrix, y: classes (1: setosa, 2: versicolor, 3: virginica)
pywikibot.output(x.shape)
pywikibot.output(y.shape)
# Dimensionality reduction by Principal Component Analysis (PCA)
pca = mlpy.PCA() # new PCA instance
pca.learn(x) # learn from data
z = pca.transform(x, k=2) # embed x into the k=2 dimensional subspace
pywikibot.output(z.shape)
plt.set_cmap(plt.cm.Paired)
fig1 = plt.figure(1)
title = plt.title("PCA on dataset")
plot = plt.scatter(z[:, 0], z[:, 1], c=y)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
plt.show()
# Learning by Kernel Support Vector Machines (SVMs) on principal components
linear_svm = mlpy.LibSvm(kernel_type='linear') # new linear SVM instance
linear_svm.learn(z, y) # learn from principal components
# !!! train also BoW (bag-of-words) in '_detectclassify_ObjectAll' resp. 'opencv.BoWclassify.main' !!!
xmin, xmax = z[:,0].min()-0.1, z[:,0].max()+0.1
ymin, ymax = z[:,1].min()-0.1, z[:,1].max()+0.1
xx, yy = np.meshgrid(np.arange(xmin, xmax, 0.01), np.arange(ymin, ymax, 0.01))
zgrid = np.c_[xx.ravel(), yy.ravel()]
yp = linear_svm.pred(zgrid)
plt.set_cmap(plt.cm.Paired)
fig2 = plt.figure(2)
title = plt.title("SVM (linear kernel) on principal components")
plot1 = plt.pcolormesh(xx, yy, yp.reshape(xx.shape))
plot2 = plt.scatter(z[:, 0], z[:, 1], c=y)
labx = plt.xlabel("First component")
laby = plt.ylabel("Second component")
limx = plt.xlim(xmin, xmax)
limy = plt.ylim(ymin, ymax)
plt.show()
linear_svm.save_model('cache/test.csf')
pywikibot.output(u'Linear SVM model stored to %s.' % 'cache/test.csf')
# for functions in C/C++ that might crash hard without any exception throwed
# e.g. an abort due to an assert or something else
def enable_recovery():
recoveryfile = open(os.path.join(scriptdir, 'cache/catimages_recovery'), "w")
recoveryfile.write('')
recoveryfile.close()
def disable_recovery():
if os.path.exists( os.path.join(scriptdir, 'cache/catimages_recovery') ):
os.remove( os.path.join(scriptdir, 'cache/catimages_recovery') )
# Main loop will take all the (name of the) images and then i'll check them.
if __name__ == "__main__":
old = datetime.datetime.strptime(str(datetime.datetime.utcnow()).split('.')[0], "%Y-%m-%d %H:%M:%S") #timezones are UTC
if sys.exc_info()[0]: # re-raise ImportError
raise #
try:
main()
finally:
final = datetime.datetime.strptime(str(datetime.datetime.utcnow()).split('.')[0], "%Y-%m-%d %H:%M:%S") #timezones are UTC
delta = final - old
secs_of_diff = delta.seconds
pywikibot.output("Execution time: %s" % secs_of_diff)
pywikibot.stopme()
| 46.656278
| 273
| 0.551812
|
acff4caedc607a1fbe01b2bb3083d0046818e2a5
| 673
|
py
|
Python
|
multilineage_organoid/consts.py
|
david-a-joy/multilineage-organoid
|
9b9848cfa5ee0d051b2a9645f9ffd8b9423beec8
|
[
"BSD-3-Clause"
] | 2
|
2020-08-13T18:09:53.000Z
|
2021-12-31T22:36:07.000Z
|
multilineage_organoid/consts.py
|
david-a-joy/multilineage-organoid
|
9b9848cfa5ee0d051b2a9645f9ffd8b9423beec8
|
[
"BSD-3-Clause"
] | null | null | null |
multilineage_organoid/consts.py
|
david-a-joy/multilineage-organoid
|
9b9848cfa5ee0d051b2a9645f9ffd8b9423beec8
|
[
"BSD-3-Clause"
] | null | null | null |
""" Shared default parameters across the modules """
SIGNAL_TYPE = 'F/F0' # F-F0, F/F0, F-F0/F0
LINEAR_MODEL = 'ransac' # 'least_squares' or 'ransac' or 'exp_ransac'
DATA_TYPE = 'ca' # one of 'ca', 'ephys'
FILTER_ORDER = 1 # Order of the butterworth filter
FILTER_CUTOFF = 4 # Hz cutoff for the filter
MIN_STATS_SCORE = 0.2 # Cutoff for peaks to be sufficiently wavy
FIGSIZE = 8 # inches - the width of a (square) figure panel
PLOT_SUFFIX = '.png'
SAMPLES_AROUND_PEAK = 25 # Minimum number of samples around a peak before the next peak can start
TIME_SCALE = 1000 # milliseconds / second
DEBUG_OPTIMIZER = False # If True, print optimizer debug messages
| 30.590909
| 98
| 0.717682
|
acff4d349e688667b0ea494459dda99a9eb3a688
| 1,127
|
py
|
Python
|
setup.py
|
ZhaoNeil/prometheus-grafana-deploy
|
c8ec609970d64f8d4104d8810dc301aff3df9232
|
[
"MIT"
] | null | null | null |
setup.py
|
ZhaoNeil/prometheus-grafana-deploy
|
c8ec609970d64f8d4104d8810dc301aff3df9232
|
[
"MIT"
] | null | null | null |
setup.py
|
ZhaoNeil/prometheus-grafana-deploy
|
c8ec609970d64f8d4104d8810dc301aff3df9232
|
[
"MIT"
] | 1
|
2022-01-10T14:51:10.000Z
|
2022-01-10T14:51:10.000Z
|
import setuptools
import os
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
f = open(path)
return f.read()
install_requires = [x for x in read('requirements.txt').strip().split('\n') if x]
setuptools.setup(
name='prometheus_grafana_deploy',
version='0.1.1',
author='Sebastiaan Alvarez Rodriguez',
author_email='a@b.c',
description='Prometheus+Grafana monitoring deployment tool for Prometheus, using metareserve reservation system',
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/Sebastiaan-Alvarez-Rodriguez/prometheus-grafana-deploy',
packages=setuptools.find_packages(),
package_dir={'': '.'},
classifiers=(
'Environment :: Console',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
),
install_requires=install_requires,
entry_points={
'console_scripts': [
'grafana-monitor = prometheus_grafana_deploy.cli.entrypoint:main',
],
},
)
| 32.2
| 117
| 0.669033
|
acff4d35134a2b25dc213926a13cc5318682e00a
| 12,480
|
py
|
Python
|
faker/providers/address/ne_NP/__init__.py
|
StabbarN/faker
|
57882ff73255cb248d8f995b2abfce5cfee45ab3
|
[
"MIT"
] | 4
|
2020-09-23T15:48:00.000Z
|
2021-02-25T07:55:23.000Z
|
faker/providers/address/ne_NP/__init__.py
|
StabbarN/faker
|
57882ff73255cb248d8f995b2abfce5cfee45ab3
|
[
"MIT"
] | 10
|
2020-03-24T10:47:53.000Z
|
2021-04-08T19:51:44.000Z
|
faker/providers/address/ne_NP/__init__.py
|
StabbarN/faker
|
57882ff73255cb248d8f995b2abfce5cfee45ab3
|
[
"MIT"
] | 1
|
2020-10-26T11:00:22.000Z
|
2020-10-26T11:00:22.000Z
|
from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ('#', '##', '###')
street_name_formats = ('{{last_name}} {{street_suffix}}',)
street_address_formats = ('{{street_name}}',)
city_formats = ('{{city}}',)
# http://www.nepalpost.gov.np/index.php/postal-codes-of-nepal
postcode_formats = ('#####',)
address_formats = (
"{{street_name}} {{building_prefix}} {{building_number}} \n{{city}}\n{{district}} {{postcode}}",
)
street_suffixes = (
'मार्ग',
'आश्रम',
'बाटो',
'पथ',
'गल्ली',
'गेट',
'हाईट',
'टार',
'रोड',
'कुना',
'चौर',
'निवास',
)
building_prefixes = ('वडा', 'घर')
# https://en.wikipedia.org/wiki/List_of_sovereign_states
countries = (
'अंगोला',
'अक्रोटिरी र धेकेलिया',
'अजरबैजान',
'अफगानिस्तान',
'अमेरिकी सामोआ',
'अरुबा',
'अर्जेन्टिना',
'अर्मेनिया',
'अलडेर्नी',
'अल्जेरिया',
'अल्बानिया',
'अस्ट्रिया',
'अस्ट्रेलिया',
'आइजल अफ म्यान',
'आइभोरी कोस्ट',
'आइसल्याण्ड',
'आजाद कश्मीर',
'आयरल्याण्ड',
'इक्वेटोरियल गिनी',
'इक्वेडर',
'इजरायल',
'इटाली',
'इण्डोनेशिया',
'इथियोपिया',
'इराक',
'इरान',
'इस्टोनिया',
'उज्बेकिस्तान',
'उत्तर कोरिया',
'उत्तरी मारिआना टापु',
'उत्तरी साइप्रस',
'उरुग्वे',
'एङगुइला',
'एण्डोरा',
'एन्टिगुआ र बर्बुडा',
'एरिट्रिया',
'एल साल्भादोर',
'एशमोर र कर्टियर टापु',
'ओमान',
'कजाख्स्तान',
'कतार',
'कम्बोडिया',
'किरिबाटी',
'किर्गिजस्तान',
'कुक द्वीप',
'कुराकाओ',
'कुवैत',
'केन्या',
'केप भर्ड',
'केम्यान टापु',
'कोकोस टापु',
'कोटे डी आइभोरी',
'कोमोरोस',
'कोरल सी टापु क्षेत्र',
'कोलम्बिया',
'कोसोभो',
'कोस्टारिका',
'क्यानडा',
'क्यामेरून',
'क्युबा',
'क्रिसमस टापु',
'क्रोएसिया',
'क्लिप्परटन द्वीप',
'क्वीन माउड ल्याण्ड',
'गणतन्त्र कङ्गो',
'गणतन्त्र कोरिया',
'गणतन्त्र स्पर्स्का',
'गाबोन',
'गिनी',
'गिब्राल्टार',
'गिलगीत',
'गुयना',
'गुर्न्जी',
'ग्रिनाडा',
'ग्रीनल्याण्ड',
'ग्रीस',
'ग्वाटेमाला',
'ग्वाम',
'घाना',
'चाड',
'चिली',
'चीन',
'चेक गणतन्त्र',
'जमैका',
'जर्मनी',
'जर्सी',
'जापान',
'जाम्बिया',
'जिबुटी',
'जोर्डन',
'टर्की',
'टिमोर',
'टुभालु',
'टुर्क्स तथा काइकोस टापु',
'टोंगा',
'टोकेलाउ',
'टोगो',
'ट्युनिसिया',
'ट्रान्सनिसट्रिया',
'ट्रिनिडाड र टोबागो',
'डेनमार्क',
'डोमिनिकन गणतन्त्र',
'डोमिनिका',
'तन्जानिया',
'ताइवान',
'ताजिकिस्तान',
'तुर्कमेनिस्तान',
'थाइल्याण्ड',
'दक्षिण अफ्रिका',
'दक्षिण ओसेटिया',
'दक्षिण कोरिया',
'दक्षिण जर्जिया तथा दक्षिण स्याण्डवीच टापु',
'दक्षिणी सुडान',
'नर्वे',
'नर्वेको',
'नाइजर',
'नाइजेरिया',
'नाउरु',
'नागोर्नो',
'नामिबिया',
'निकाराग्वा',
'नियु',
'नेदरल्याण्ड',
'नेपाल',
'नोर्फोक टापु',
'न्यु क्यालोडेनिया',
'न्युजिल्यान्ड',
'पपुवा न्युगिनी',
'पलाउ',
'पाकिस्तान',
'पानामा',
'पाराग्वे',
'पिटकेर्न टापु',
'पिटर द्वीप',
'पूर्वी टिमोर',
'पेरु',
'पोर्चुगल',
'पोल्याण्ड',
'प्यालेस्टाइन',
'प्युर्तो रिको',
'प्रजातान्त्रिक गणतन्त्र कंगो',
'प्रजातान्त्रिक गणतन्त्र कोरिया',
'प्रिडेनेस्ट्रोभी',
'फकल्याण्ड टापु',
'फरोइ टापु',
'फिजी',
'फिनल्याण्ड',
'फिलिपिन्स',
'फ्रान्स',
'फ्रेन्च दक्षिणी र अन्टार्कटिक द्वीप',
'फ्रेन्च पोलिनेसिया',
'बंगलादेश',
'बर्मा',
'बर्मुडा',
'बहराइन',
'बहामस',
'बार्बाडोस',
'बुरुन्डी',
'बुर्किना फासो',
'बुल्गेरिया',
'बेनिन',
'बेलारूस',
'बेलिज',
'बेल्जियम',
'बोत्स्वाना',
'बोलिभिया',
'बोस्निया र हर्जगोभिना',
'बोस्निया र हर्जगोभिना संघ',
'बौभेट द्वीप',
'ब्राजिल',
'ब्रिटिस भर्जिन टापु',
'ब्रुनेई',
'भानुअटु',
'भारत',
'भियतनाम',
'भुटान',
'भेनेजुएला',
'भ्याटिकन',
'भ्याटिकन सिटी',
'मकाउ',
'मङ्गोलिया',
'मध्य अफ्रिकी गणतन्त्र',
'मलावी',
'मलेशिया',
'माइक्रोनेसियाको संघीय राज्य',
'माडागास्कर',
'मार्शल द्वीप',
'माली',
'माल्टा',
'माल्दिभ्स',
'मिश्र',
'मेक्सिको',
'मोजाम्बिक',
'मोनाको',
'मोन्टसेराट',
'मोन्टेनेग्रो',
'मोरक्को',
'मोल्डोभा',
'मौरिसनिया',
'मौरिसस',
'म्यानमार',
'म्यासेडोनिया',
'यमन',
'युक्रेन',
'युगान्डा',
'रसिया',
'रुवाण्डा',
'रोमानिया',
'रोस डिपेन्डेन्सी',
'लक्जेम्बर्ग',
'लाईबेरिया',
'लाओस',
'लात्भिया',
'लिचटेन्स्टाइन',
'लिथुआनिया',
'लिबिया',
'लेबनान',
'लेसोथो',
'वाल्लिस र फुटुना',
'श्रीलंका',
'संघीय राज्य माइक्रोनेसिया',
'संयुक्त अधिराज्य',
'संयुक्त अरब इमिरेट्स',
'संयुक्त राज्य अमेरिका',
'संयुक्त राज्य भर्जिन टापु',
'सर्बिया',
'साइप्रस',
'साउदी अरब',
'साओ टोमे र प्रिन्सिपे',
'सान मारिनो',
'साबा',
'सामोआ',
'साहरवी अरब लोकतान्त्रिक गणतन्त्र',
'सिंगापुर',
'सिन्ट मार्टिन',
'सीरियन कुर्दिस्तान',
'सीरिया',
'सुडान',
'सुरिनेम',
'सेनेगल',
'सेन्ट किट्स र नेभिस',
'सेन्ट पियेर्रे र मिकुएलन',
'सेन्ट बार्थेलेमी',
'सेन्ट भिन्सेन्ट र ग्रेनाडाइन्स',
'सेन्ट मार्टिन',
'सेन्ट लुसिया',
'सेन्ट हेलेना',
'सेरा लियोन',
'सेसेल्स',
'सोमालिया',
'सोमालील्याण्ड',
'सोलोमन द्वीप',
'स्पेन',
'स्लोभाकिया',
'स्लोभेनिया',
'स्वाजिल्याण्ड',
'स्विजरल्याण्ड',
'स्वीडेन',
'हंगेरी',
'हङकङ',
'हर्म',
'हाइटी',
'हेयर्ड द्वीप र म्याकडोनाल्ड टापु',
'होन्डुरस',
'अबखाजिया',
'जर्जिया',
)
# cities are taken from
# https://en.wikipedia.org/wiki/List_of_cities_in_Nepal
cities = (
'मिर्चैया',
'प्युठान',
'कञ्चनपुर',
'लुम्बिनी सांस्कृतिक',
'बागलुङ',
'इलाम',
'भक्तपुर',
'भद्रपुर',
'घोराही',
'स्याङ्जा',
'खैरहानी नगरपालिका',
'म्याग्दी',
'रंगेली',
'काठमाडौं',
'शनि-अर्जुन',
'पर्वत',
'सप्तरी',
'पनौती',
'जयपृथ्वी',
'लहान',
'वालिङ',
'बर्दघाट',
'डोटी',
'धरान',
'पथरी शनिश्चरे',
'चन्दननाथ',
'नवलपरासी',
'किर्तिपुर',
'दैलेख',
'सुनसरी',
'बेलौरी',
'कुस्मा',
'मकवानपुर',
'कञ्चनरूप',
'गुलरिया',
'टीकापुर',
'राजापुर',
'फिदिम',
'खोटाङ',
'धनुषाधाम',
'झापा',
'पुनर्वास',
'भक्तपुर',
'बर्दिया',
'बागलुङ',
'दमक',
'तेह्रथुम',
'नारायण',
'ताप्लेजुङ',
'तानसेन',
'पाँचखाल',
'बनेपा',
'म्याङ्लुङ',
'ललितपुर',
'दिपायल',
'अपी',
'दाङ',
'सन्धिखर्क',
'धनकुटा',
'बिरेन्द्रनगर',
'गौर',
'मोरङ',
'सङ्खुवासभा',
'लम्की-चुहा',
'बारा',
'हरिवन नगरपालिका',
'मलङ्वा',
'सिराहा',
'जनकपुर',
'सल्यान',
'सिन्धुपाल्चोक',
'दुल्लु',
'ओखलढुङ्गा',
'पाल्पा',
'इटहरी',
'रेसुङगा',
'कृष्णनगर',
'शुक्लगण्डकी',
'नुवाकोट',
'साँफेबगर',
'राजविराज',
'नेपालगंज',
'भिमेश्वर',
'ताप्लेजुङ',
'धुलिखेल',
'व्यास',
'भोजपुर',
'धादिङ',
'बेनी',
'अर्घाखाँची',
'भीमदत्त',
'रौतहट',
'जलेश्वर',
'देवदह',
'बेलवारी',
'बुटवल',
'सुर्खेत',
'मङ्गलसेन',
'कैलाली',
'धनकुटा',
'रुपन्देही',
'सल्यान',
'रामपुर',
'बिराटनगर',
'चौतारा',
'देवचुली',
'कपिलवस्तु',
'सुनवल',
'शिवराज',
'चम्पापुर (चापागाउँ)',
'भरतपुर',
'गढिमाई',
'उर्लावारी',
'लेखनाथ',
'सिद्धिचरण',
'मेचीनगर',
'चित्रवन',
'कास्की',
'गौशाला',
'पुतलीबजार',
'बिदुर',
'शम्भुनाथ',
'पर्सा',
'प्युठान',
'निजगढ',
'डडेलधुरा',
'कन्काई',
'गैंडाकोट',
'पाल्पा',
'कार्यविनायक*',
'तिलोत्तमा',
'तुलसीपुर',
'वीरगञ्ज',
'शंखरपुर*',
'अत्तरिया',
'बझाङ',
'मन्थली*',
'कपिलवस्तु',
'कटारी',
'हेटौडा',
'कलैया',
'सुन्दर दुलारी',
'सिन्धुली',
'थाहा',
'बाँके',
'ललितपुर',
'दार्चुला',
'पोखरा',
'बन्दीपुर',
'सर्लाही',
'कोहलपुर',
'सैनामैना',
'अमरागढी',
'उदयपुर',
'काठमाडौं',
'सुर्योदय',
'सिराहा',
'महोत्तरी',
'धनगढी',
'शारदा',
'काभ्रेपलाञ्चोक',
'त्रियुगा',
'रामेछाप',
'पाँचथर',
'इलाम',
'भोजपुर',
'मध्यपुर ठिमी',
'दुहवी-भलुवा',
'दशरथचन्द',
'बैतडी',
'कोशी हरैंचा',
'चापाकोट',
'दिक्तेल',
'चन्द्रपुर',
'लालबन्दी',
'चितवन',
'रत्ननगर',
'पृथ्वीनारायण',
'धनुषा',
'गुल्मी',
'बेंसीशहर',
'लमजुङ',
'अछाम',
'तनहुँ',
'खाँदबारी',
'बिर्तामोड',
'कमलामाई',
'छिरेश्वरनाथ',
'सिद्धार्थनगर',
'निलकण्ठ',
'गोर्खा',
'दोलखा',
'रामग्राम',
'इनरूवा',
'कावासोती',
'बेल्टार बसाहा',
'जुम्ला',
'ईश्वरपुर',
)
districts = (
'अछाम',
'अर्घाखाँची',
'इलाम',
'उदयपुर',
'ओखलढुङ्गा',
'कञ्चनपुर',
'कपिलवस्तु',
'काठमाडौं',
'काभ्रेपलाञ्चोक',
'कालीकोट',
'कास्की',
'कैलाली',
'खोटाङ',
'गुल्मी',
'गोर्खा',
'चितवन',
'जाजरकोट',
'जुम्ला',
'झापा',
'डडेल्धुरा',
'डोटी',
'डोल्पा',
'तनहुँ',
'ताप्लेजुङ',
'तेह्रथुम',
'दाङ',
'दार्चुला',
'दैलेख',
'दोलखा',
'धनकुटा',
'धनुषा',
'धादिङ',
'नवलपरासी',
'नुवाकोट',
'पर्वत',
'पर्सा',
'पाँचथर',
'पाल्पा',
'प्युठान',
'बझाङ',
'बर्दिया',
'बाँके',
'बाग्लुङ',
'बाजुरा',
'बारा',
'भक्तपुर',
'भोजपुर',
'मकवानपुर',
'मनाङ',
'महोत्तरी',
'मुगु',
'मुस्ताङ',
'मोरङ',
'म्याग्दी',
'रसुवा',
'रामेछाप',
'रुकुम',
'रूपन्देही',
'रोल्पा',
'रौतहट',
'लमजुङ्',
'ललितपुर',
'वैतडी',
'संखुवासभा',
'सप्तरी',
'सर्लाही',
'सल्यान',
'सिन्धुपलाञ्चोक',
'सिन्धुली',
'सिराहा',
'सुनसरी',
'सुर्खेत',
'सोलुखुम्बु',
'स्याङ्जा',
'हुम्ला',
)
def district(self):
"""
:example अछाम
"""
return self.random_element(self.districts)
def city(self):
"""
:example कावासोत
"""
return self.random_element(self.cities)
def building_prefix(self):
"""
:example वडा
"""
return self.random_element(self.building_prefixes)
| 20.8
| 104
| 0.274119
|
acff4d3624a496fade2d0642041eb9e3d25e0ae0
| 10,542
|
py
|
Python
|
docs/conf.py
|
amd-yan/simple-salesforce
|
4284a84ea01549ab099dbd200076f2c4d97b8023
|
[
"Apache-2.0"
] | 1,109
|
2016-06-04T21:03:47.000Z
|
2022-03-29T03:48:46.000Z
|
docs/conf.py
|
amd-yan/simple-salesforce
|
4284a84ea01549ab099dbd200076f2c4d97b8023
|
[
"Apache-2.0"
] | 385
|
2016-05-31T05:51:02.000Z
|
2022-03-24T21:41:38.000Z
|
docs/conf.py
|
amd-yan/simple-salesforce
|
4284a84ea01549ab099dbd200076f2c4d97b8023
|
[
"Apache-2.0"
] | 531
|
2016-06-03T16:54:57.000Z
|
2022-03-23T11:07:57.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# simple-salesforce documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 22 19:18:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'simple-salesforce'
copyright = '2016, Nick Catalano, community contributors'
author = 'Nick Catalano, community contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.72.1'
# The full version, including alpha/beta/rc tags.
release = '0.72.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Autodoc configuration -----------------------------------------------------
def autodoc_skip_member(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
# -- Apidoc --------------------------------------------------------------------
def run_apidoc(_):
subprocess.check_call("sphinx-apidoc --separate -f -o . ../simple_salesforce", shell=True)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'simple-salesforce v0.72.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'simple-salesforcedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'simple-salesforce.tex', 'simple-salesforce Documentation',
'Nick Catalano, community contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simple-salesforce', 'simple-salesforce Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'simple-salesforce', 'simple-salesforce Documentation',
author, 'simple-salesforce', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
def setup(app):
app.connect("autodoc-skip-member", autodoc_skip_member)
app.connect("builder-inited", run_apidoc)
| 29.283333
| 94
| 0.696927
|
acff4dadaea8d54e63b91d552c6e37556e42071a
| 3,707
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
Suryanto28/Wallet
|
52bf852fce2ef470b10ebe36c5f920faf277b1d4
|
[
"MIT"
] | 8
|
2019-06-07T22:21:15.000Z
|
2020-12-16T16:02:44.000Z
|
contrib/macdeploy/custom_dsstore.py
|
Suryanto28/Wallet
|
52bf852fce2ef470b10ebe36c5f920faf277b1d4
|
[
"MIT"
] | 2
|
2020-06-23T07:13:33.000Z
|
2020-09-13T10:33:29.000Z
|
contrib/macdeploy/custom_dsstore.py
|
Suryanto28/Wallet
|
52bf852fce2ef470b10ebe36c5f920faf277b1d4
|
[
"MIT"
] | 5
|
2019-09-28T19:26:18.000Z
|
2021-06-19T09:44:02.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['TRBO-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.783333
| 1,817
| 0.724036
|
acff4ea473d1c7e88ce403c7fccf71fc88cad0ce
| 2,157
|
py
|
Python
|
main/schema/section_analysis.py
|
kristianmk/tator
|
0eb75ee9333316b06f773de2b75e8e797a98ffdb
|
[
"MIT"
] | 50
|
2019-09-18T14:32:18.000Z
|
2022-03-31T16:26:07.000Z
|
main/schema/section_analysis.py
|
kristianmk/tator
|
0eb75ee9333316b06f773de2b75e8e797a98ffdb
|
[
"MIT"
] | 566
|
2019-09-18T16:33:40.000Z
|
2022-03-31T20:01:38.000Z
|
main/schema/section_analysis.py
|
kristianmk/tator
|
0eb75ee9333316b06f773de2b75e8e797a98ffdb
|
[
"MIT"
] | 19
|
2019-09-21T20:08:12.000Z
|
2022-03-17T14:53:11.000Z
|
from textwrap import dedent
from rest_framework.schemas.openapi import AutoSchema
from ._errors import error_responses
from ._attributes import attribute_filter_parameter_schema
class SectionAnalysisSchema(AutoSchema):
def get_operation(self, path, method):
operation = super().get_operation(path, method)
if method == 'GET':
operation['operationId'] = 'GetSectionAnalysis'
operation['tags'] = ['Tator']
return operation
def get_description(self, path, method):
return dedent("""\
Retrieve analysis results for a media list.
This endpoint uses objects created with the `Analysis` endpoint to perform analysis
on filtered media lists.
""")
def _get_path_parameters(self, path, method):
return [{
'name': 'project',
'in': 'path',
'required': True,
'description': 'A unique integer identifying a project.',
'schema': {'type': 'integer'},
}]
def _get_filter_parameters(self, path, method):
return [
{
'name': 'media_id',
'in': 'query',
'required': False,
'description': 'Unique integer identifying a media. Use this to do analyis '
'on a single file instead of sections.',
'explode': False,
'schema': {
'type': 'array',
'items': {
'type': 'integer',
'minimum': 1,
},
},
},
] + attribute_filter_parameter_schema
def _get_request_body(self, path, method):
return {}
def _get_responses(self, path, method):
responses = error_responses()
if method == 'GET':
responses['200'] = {
'description': 'Successful retrieval of section analysis.',
'content': {'application/json': {'schema': {
'$ref': '#/components/schemas/SectionAnalysis',
}}},
}
return responses
| 32.681818
| 92
| 0.528048
|
acff4ed95778d5cda3a352da552d4d34456b22ba
| 13,838
|
py
|
Python
|
code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20024_data.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20024_data.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/ETFProfileandPrices/v2/fds/sdk/ETFProfileandPrices/model/inline_response20024_data.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.ETFProfileandPrices.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.ETFProfileandPrices.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.ETFProfileandPrices.model.inline_response20024_exchange import InlineResponse20024Exchange
globals()['InlineResponse20024Exchange'] = InlineResponse20024Exchange
class InlineResponse20024Data(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'symbol': (str,), # noqa: E501
'name': (str,), # noqa: E501
'security_type': (str,), # noqa: E501
'exchange': (InlineResponse20024Exchange,), # noqa: E501
'report_date': (date,), # noqa: E501
'shares': (float,), # noqa: E501
'market_value': (float,), # noqa: E501
'currency_code': (str,), # noqa: E501
'weight': (float,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'symbol': 'symbol', # noqa: E501
'name': 'name', # noqa: E501
'security_type': 'securityType', # noqa: E501
'exchange': 'exchange', # noqa: E501
'report_date': 'reportDate', # noqa: E501
'shares': 'shares', # noqa: E501
'market_value': 'marketValue', # noqa: E501
'currency_code': 'currencyCode', # noqa: E501
'weight': 'weight', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse20024Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
symbol (str): Identifier of the holding. (when available).. [optional] # noqa: E501
name (str): Name of the holding.. [optional] # noqa: E501
security_type (str): Financial instrument type of the holding.. [optional] # noqa: E501
exchange (InlineResponse20024Exchange): [optional] # noqa: E501
report_date (date): Reporting date of the holding.. [optional] # noqa: E501
shares (float): Number of shares held, unadjusted for corporate actions.. [optional] # noqa: E501
market_value (float): Market value of the holding, unadjusted for corporate actions.. [optional] # noqa: E501
currency_code (str): Code representing the currency of the holding and it's in format ISO 4217. [optional] # noqa: E501
weight (float): Weight of the holding within the ETP.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse20024Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
symbol (str): Identifier of the holding. (when available).. [optional] # noqa: E501
name (str): Name of the holding.. [optional] # noqa: E501
security_type (str): Financial instrument type of the holding.. [optional] # noqa: E501
exchange (InlineResponse20024Exchange): [optional] # noqa: E501
report_date (date): Reporting date of the holding.. [optional] # noqa: E501
shares (float): Number of shares held, unadjusted for corporate actions.. [optional] # noqa: E501
market_value (float): Market value of the holding, unadjusted for corporate actions.. [optional] # noqa: E501
currency_code (str): Code representing the currency of the holding and it's in format ISO 4217. [optional] # noqa: E501
weight (float): Weight of the holding within the ETP.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.068027
| 133
| 0.58231
|
acff4f4b1ae957a7c61b0813ffb741de7769e6cb
| 954
|
py
|
Python
|
source/load_device.py
|
novelview9/alfred-workflow-toggle-airpods
|
b871dc6005a020a82b62291c55127822365db464
|
[
"MIT"
] | 50
|
2018-02-11T15:00:43.000Z
|
2021-12-14T21:23:58.000Z
|
source/load_device.py
|
novelview9/alfred-workflow-toggle-airpods
|
b871dc6005a020a82b62291c55127822365db464
|
[
"MIT"
] | 8
|
2018-10-19T22:15:32.000Z
|
2020-12-10T00:02:40.000Z
|
source/load_device.py
|
jasonshanks/alfred-workflow-toggle-airpods
|
8970b6ef18e6cc77bcae5c6d1a6fddb65e85cda9
|
[
"MIT"
] | 4
|
2018-06-07T11:00:28.000Z
|
2019-07-24T04:12:56.000Z
|
# encoding: utf-8
import sys
from workflow import Workflow, ICON_WEB, web
import subprocess
import argparse
def main(wf):
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='*', default=None)
args = parser.parse_args(wf.args)
if args.query:
query = args.query
input = query[-1]
wf.add_item(
title='Setting name "{name}" as Default'.format(name=input),
arg=" ".join(query),
valid=True,
)
else:
scpt_output = subprocess.check_output("osascript bluetooth.scpt", shell=True).decode('utf-8')
devices = sorted(scpt_output.split(', '), key=lambda x: 'Pod' in x, reverse=True)
for device in devices:
wf.add_item(
device,
device,
autocomplete=device
)
wf.send_feedback()
if __name__ == "__main__":
wf = Workflow()
sys.exit(wf.run(main))
| 23.85
| 103
| 0.578616
|
acff4f843b132d8509947f3747db1374ed2f70a0
| 5,602
|
py
|
Python
|
Python_OCR_JE/venv/Lib/site-packages/numpy/random/tests/test_regression.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
Python_OCR_JE/venv/Lib/site-packages/numpy/random/tests/test_regression.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
Python_OCR_JE/venv/Lib/site-packages/numpy/random/tests/test_regression.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
import numpy as np
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
np.random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
np.random.seed(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
np.random.seed(1)
m = M()
perm = np.random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
| 37.346667
| 78
| 0.561585
|
acff4f988be312539ee441d65f07ba07cb8c9d9e
| 738
|
py
|
Python
|
gui-testing.py
|
misterhay/VISCA-IP-Controller-GUI
|
e7dcde715f06e14a0e6dff7ab29783fb1663e582
|
[
"Unlicense"
] | 1
|
2022-01-22T17:06:38.000Z
|
2022-01-22T17:06:38.000Z
|
gui-testing.py
|
misterhay/VISCA-IP-Controller-GUI
|
e7dcde715f06e14a0e6dff7ab29783fb1663e582
|
[
"Unlicense"
] | null | null | null |
gui-testing.py
|
misterhay/VISCA-IP-Controller-GUI
|
e7dcde715f06e14a0e6dff7ab29783fb1663e582
|
[
"Unlicense"
] | 1
|
2022-01-22T17:06:45.000Z
|
2022-01-22T17:06:45.000Z
|
from tkinter import *
def print_this(x):
print(x)
root = Tk()
root.geometry("750x250")
mousejoy = 0
colors = ['red','lightgrey']
def motion(event):
x, y = event.x, event.y
print('{}, {}'.format(x, y))
def nomotion(event):
pass
def joypad():
global mousejoy
b.configure(bg=colors[mousejoy])
if mousejoy == 0:
root.bind('<Motion>', motion)
# root.bind('<Double-Button-1>', handler) # bind click to stopping motion tracking
mousejoy = 1
else:
root.bind('<Motion>', nomotion)
mousejoy = 0
b = Button(root, command=joypad, text="Testing")
b.pack(anchor=N)
scale = Scale(root, from_=7, to=-7, command=print_this)
scale.pack(anchor=CENTER)
root.mainloop()
| 19.421053
| 90
| 0.616531
|
acff4ff3891e9e92bb5092a767a22aa0da506992
| 3,911
|
py
|
Python
|
a_compile.py
|
neurolabusc/pigz-bench-python
|
9201517761bbe16940870d5e18470527e90d1cdb
|
[
"BSD-2-Clause"
] | 1
|
2020-07-09T03:55:46.000Z
|
2020-07-09T03:55:46.000Z
|
a_compile.py
|
neurolabusc/pigz-bench-python
|
9201517761bbe16940870d5e18470527e90d1cdb
|
[
"BSD-2-Clause"
] | 4
|
2020-03-25T04:14:45.000Z
|
2021-11-27T16:04:46.000Z
|
a_compile.py
|
neurolabusc/pigz-bench-python
|
9201517761bbe16940870d5e18470527e90d1cdb
|
[
"BSD-2-Clause"
] | 2
|
2020-04-26T18:04:45.000Z
|
2020-05-05T21:57:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import stat
import shutil
import subprocess
import platform
import zipfile
from distutils.dir_util import copy_tree
def rmtree(top):
"""Delete folder and contents: shutil.rmtree has issues with read-only files on Windows"""
for (root, dirs, files) in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWUSR)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def install_silesia_corpus():
"""Install popular Silesia corpus"""
basedir = os.getcwd()
corpusdir = os.path.join(basedir, 'silesia')
if os.path.isdir(corpusdir):
rmtree(corpusdir)
try:
os.mkdir(corpusdir)
except OSError:
print('Creation of the directory {} failed' .format(corpusdir) )
cmd = 'git clone https://github.com/MiloszKrajewski/SilesiaCorpus silesia'
print("Installing "+corpusdir);
subprocess.call(cmd, shell=True)
os.chdir(corpusdir)
fnm = 'README.md'
if os.path.isfile(fnm):
os.remove(fnm)
ext = '.zip'
for item in os.listdir(corpusdir): # loop through items in dir
print("+"+item)
if item.endswith(ext): # check for ".zip" extension
file_name = os.path.abspath(item) # get full path of files
print(file_name)
zip_ref = zipfile.ZipFile(file_name) # create zipfile object
zip_ref.extractall(corpusdir) # extract file to dir
zip_ref.close() # close file
os.remove(file_name) # delete zipped file
os.chdir(basedir)
def install_neuro_corpus():
"""Install neuroimaging corpus"""
basedir = os.getcwd()
corpusdir = os.path.join(basedir, 'corpus')
if os.path.isdir(corpusdir):
rmtree(corpusdir)
try:
os.mkdir(corpusdir)
except OSError:
print('Creation of the directory {} failed' .format(exedir) )
cmd = 'git clone https://github.com/neurolabusc/zlib-bench.git'
subprocess.call(cmd, shell=True)
indir = os.path.join(basedir, 'zlib-bench', 'corpus')
tmpfile = os.path.join(indir, 'README')
if os.path.isfile(tmpfile):
os.remove(tmpfile)
copy_tree(indir, corpusdir)
indir = os.path.join(basedir, 'zlib-bench')
rmtree(indir)
def compile_pigz():
"""compile variants of pigz"""
methods = ['ng']
if platform.system() == 'Windows':
methods = ['Cloudflare', 'ng']
basedir = os.getcwd()
exedir = os.path.join(basedir, 'exe')
if os.path.isdir(exedir):
rmtree(exedir)
try:
os.mkdir(exedir)
except OSError:
print ("Creation of the directory {} failed" .format(exedir) )
pigzdir = './pigz'
if os.path.isdir(pigzdir):
rmtree(pigzdir)
cmd = 'git clone https://github.com/neurolabusc/pigz'
subprocess.call(cmd, shell=True)
pigzdir = os.path.join(basedir, 'pigz', 'build')
pigzexe = os.path.join(pigzdir, 'bin', 'pigz')
ext = ''
if platform.system() == 'Windows':
ext = '.exe'
pigzexe = pigzexe + ext
for method in methods:
os.chdir(basedir)
if os.path.isdir(pigzdir):
rmtree(pigzdir)
os.mkdir(pigzdir)
os.chdir(pigzdir)
cmd = 'cmake -DZLIB_IMPLEMENTATION=' + method + ' ..'
subprocess.call(cmd, shell=True)
cmd = 'make'
if platform.system() == 'Windows':
cmd = 'cmake --build . --config Release'
subprocess.call(cmd, shell=True)
outnm = os.path.join(exedir, 'pigz' + method + ext)
print (pigzexe + '->' + outnm)
shutil.move(pigzexe, outnm)
if __name__ == '__main__':
"""compile variants of pigz and sample compression corpus"""
install_neuro_corpus()
install_silesia_corpus()
compile_pigz()
| 32.057377
| 94
| 0.618001
|
acff50e3a1d9c46827e694c8965d4c4217abea66
| 14,759
|
py
|
Python
|
utils_1/keras_layer_DecodeDetectionsFast.py
|
madhu-korada/Mobilenet-V2-SSD-keras
|
75c85f0d9a16cbf6df39ed5219551f201e8871ca
|
[
"Apache-2.0"
] | null | null | null |
utils_1/keras_layer_DecodeDetectionsFast.py
|
madhu-korada/Mobilenet-V2-SSD-keras
|
75c85f0d9a16cbf6df39ed5219551f201e8871ca
|
[
"Apache-2.0"
] | null | null | null |
utils_1/keras_layer_DecodeDetectionsFast.py
|
madhu-korada/Mobilenet-V2-SSD-keras
|
75c85f0d9a16cbf6df39ed5219551f201e8871ca
|
[
"Apache-2.0"
] | null | null | null |
'''
A custom Keras layer to decode the raw SSD prediction output. This is a modified
and more efficient version of the `DetectionOutput` layer type in the original Caffe
implementation of SSD. For a faithful replication of the original layer, please
refer to the `DecodeDetections` layer.
Copyright (C) 2018 Pierluigi Ferrari
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
class DecodeDetectionsFast(Layer):
'''
A Keras layer to decode the raw SSD prediction output.
Input shape:
3D tensor of shape `(batch_size, n_boxes, n_classes + 12)`.
Output shape:
3D tensor of shape `(batch_size, top_k, 6)`.
'''
def __init__(self,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
coords='centroids',
normalize_coords=True,
img_height=None,
img_width=None,
**kwargs):
'''
All default argument values follow the Caffe implementation.
Arguments:
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
nms_max_output_size (int, optional): The maximum number of predictions that will be left after performing non-maximum
suppression.
coords (str, optional): The box coordinate format that the model outputs. Must be 'centroids'
i.e. the format `(cx, cy, w, h)` (box center coordinates, width, and height). Other coordinate formats are
currently not supported.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
'''
if K.backend() != 'tensorflow':
raise TypeError("This layer only supports TensorFlow at the moment, but you are using the {} backend.".format(K.backend()))
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
if coords != 'centroids':
raise ValueError("The DetectionOutput layer currently only supports the 'centroids' coordinate format.")
# We need these members for the config.
self.confidence_thresh = confidence_thresh
self.iou_threshold = iou_threshold
self.top_k = top_k
self.normalize_coords = normalize_coords
self.img_height = img_height
self.img_width = img_width
self.coords = coords
self.nms_max_output_size = nms_max_output_size
# We need these members for TensorFlow.
self.tf_confidence_thresh = tf.constant(self.confidence_thresh, name='confidence_thresh')
self.tf_iou_threshold = tf.constant(self.iou_threshold, name='iou_threshold')
self.tf_top_k = tf.constant(self.top_k, name='top_k')
self.tf_normalize_coords = tf.constant(self.normalize_coords, name='normalize_coords')
self.tf_img_height = tf.constant(self.img_height, dtype=tf.float32, name='img_height')
self.tf_img_width = tf.constant(self.img_width, dtype=tf.float32, name='img_width')
self.tf_nms_max_output_size = tf.constant(self.nms_max_output_size, name='nms_max_output_size')
super(DecodeDetectionsFast, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
super(DecodeDetectionsFast, self).build(input_shape)
def call(self, y_pred, mask=None):
'''
Returns:
3D tensor of shape `(batch_size, top_k, 6)`. The second axis is zero-padded
to always yield `top_k` predictions per batch item. The last axis contains
the coordinates for each predicted box in the format
`[class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
#####################################################################################
# 1. Convert the box coordinates from predicted anchor box offsets to predicted
# absolute coordinates
#####################################################################################
# Extract the predicted class IDs as the indices of the highest confidence values.
class_ids = tf.expand_dims(tf.to_float(tf.argmax(y_pred[...,:-12], axis=-1)), axis=-1)
# Extract the confidences of the maximal classes.
confidences = tf.reduce_max(y_pred[...,:-12], axis=-1, keep_dims=True)
# Convert anchor box offsets to image offsets.
cx = y_pred[...,-12] * y_pred[...,-4] * y_pred[...,-6] + y_pred[...,-8] # cx = cx_pred * cx_variance * w_anchor + cx_anchor
cy = y_pred[...,-11] * y_pred[...,-3] * y_pred[...,-5] + y_pred[...,-7] # cy = cy_pred * cy_variance * h_anchor + cy_anchor
w = tf.exp(y_pred[...,-10] * y_pred[...,-2]) * y_pred[...,-6] # w = exp(w_pred * variance_w) * w_anchor
h = tf.exp(y_pred[...,-9] * y_pred[...,-1]) * y_pred[...,-5] # h = exp(h_pred * variance_h) * h_anchor
# Convert 'centroids' to 'corners'.
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w
ymax = cy + 0.5 * h
# If the model predicts box coordinates relative to the image dimensions and they are supposed
# to be converted back to absolute coordinates, do that.
def normalized_coords():
xmin1 = tf.expand_dims(xmin * self.tf_img_width, axis=-1)
ymin1 = tf.expand_dims(ymin * self.tf_img_height, axis=-1)
xmax1 = tf.expand_dims(xmax * self.tf_img_width, axis=-1)
ymax1 = tf.expand_dims(ymax * self.tf_img_height, axis=-1)
return xmin1, ymin1, xmax1, ymax1
def non_normalized_coords():
return tf.expand_dims(xmin, axis=-1), tf.expand_dims(ymin, axis=-1), tf.expand_dims(xmax, axis=-1), tf.expand_dims(ymax, axis=-1)
xmin, ymin, xmax, ymax = tf.cond(self.tf_normalize_coords, normalized_coords, non_normalized_coords)
# Concatenate the one-hot class confidences and the converted box coordinates to form the decoded predictions tensor.
y_pred = tf.concat(values=[class_ids, confidences, xmin, ymin, xmax, ymax], axis=-1)
#####################################################################################
# 2. Perform confidence thresholding, non-maximum suppression, and top-k filtering.
#####################################################################################
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
n_boxes = tf.shape(y_pred)[1]
n_classes = y_pred.shape[2] - 4
class_indices = tf.range(1, n_classes)
# Create a function that filters the predictions for the given batch item. Specifically, it performs:
# - confidence thresholding
# - non-maximum suppression (NMS)
# - top-k filtering
def filter_predictions(batch_item):
# Keep only the non-background boxes.
positive_boxes = tf.not_equal(batch_item[...,0], 0.0)
predictions = tf.boolean_mask(tensor=batch_item,
mask=positive_boxes)
def perform_confidence_thresholding():
# Apply confidence thresholding.
threshold_met = predictions[:,1] > self.tf_confidence_thresh
return tf.boolean_mask(tensor=predictions,
mask=threshold_met)
def no_positive_boxes():
return tf.constant(value=0.0, shape=(1,6))
# If there are any positive predictions, perform confidence thresholding.
predictions_conf_thresh = tf.cond(tf.equal(tf.size(predictions), 0), no_positive_boxes, perform_confidence_thresholding)
def perform_nms():
scores = predictions_conf_thresh[...,1]
# `tf.image.non_max_suppression()` needs the box coordinates in the format `(ymin, xmin, ymax, xmax)`.
xmin = tf.expand_dims(predictions_conf_thresh[...,-4], axis=-1)
ymin = tf.expand_dims(predictions_conf_thresh[...,-3], axis=-1)
xmax = tf.expand_dims(predictions_conf_thresh[...,-2], axis=-1)
ymax = tf.expand_dims(predictions_conf_thresh[...,-1], axis=-1)
boxes = tf.concat(values=[ymin, xmin, ymax, xmax], axis=-1)
maxima_indices = tf.image.non_max_suppression(boxes=boxes,
scores=scores,
max_output_size=self.tf_nms_max_output_size,
iou_threshold=self.iou_threshold,
name='non_maximum_suppresion')
maxima = tf.gather(params=predictions_conf_thresh,
indices=maxima_indices,
axis=0)
return maxima
def no_confident_predictions():
return tf.constant(value=0.0, shape=(1,6))
# If any boxes made the threshold, perform NMS.
predictions_nms = tf.cond(tf.equal(tf.size(predictions_conf_thresh), 0), no_confident_predictions, perform_nms)
# Perform top-k filtering for this batch item or pad it in case there are
# fewer than `self.top_k` boxes left at this point. Either way, produce a
# tensor of length `self.top_k`. By the time we return the final results tensor
# for the whole batch, all batch items must have the same number of predicted
# boxes so that the tensor dimensions are homogenous. If fewer than `self.top_k`
# predictions are left after the filtering process above, we pad the missing
# predictions with zeros as dummy entries.
def top_k():
return tf.gather(params=predictions_nms,
indices=tf.nn.top_k(predictions_nms[:, 1], k=self.tf_top_k, sorted=True).indices,
axis=0)
def pad_and_top_k():
padded_predictions = tf.pad(tensor=predictions_nms,
paddings=[[0, self.tf_top_k - tf.shape(predictions_nms)[0]], [0, 0]],
mode='CONSTANT',
constant_values=0.0)
return tf.gather(params=padded_predictions,
indices=tf.nn.top_k(padded_predictions[:, 1], k=self.tf_top_k, sorted=True).indices,
axis=0)
top_k_boxes = tf.cond(tf.greater_equal(tf.shape(predictions_nms)[0], self.tf_top_k), top_k, pad_and_top_k)
return top_k_boxes
# Iterate `filter_predictions()` over all batch items.
output_tensor = tf.map_fn(fn=lambda x: filter_predictions(x),
elems=y_pred,
dtype=None,
parallel_iterations=128,
back_prop=False,
swap_memory=False,
infer_shape=True,
name='loop_over_batch')
return output_tensor
def compute_output_shape(self, input_shape):
batch_size, n_boxes, last_axis = input_shape
return (batch_size, self.tf_top_k, 6) # Last axis: (class_ID, confidence, 4 box coordinates)
def get_config(self):
config = {
'confidence_thresh': self.confidence_thresh,
'iou_threshold': self.iou_threshold,
'top_k': self.top_k,
'nms_max_output_size': self.nms_max_output_size,
'coords': self.coords,
'normalize_coords': self.normalize_coords,
'img_height': self.img_height,
'img_width': self.img_width,
}
base_config = super(DecodeDetectionsFast, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 55.070896
| 257
| 0.603632
|
acff50ff7bb7db54ed68dbd2673f27e4bbd54685
| 1,068
|
py
|
Python
|
src/tox_extra/hooks.py
|
tox-dev/tox-extra
|
9f65b4597754e8d926d81f1369bac312a078d996
|
[
"MIT"
] | 3
|
2021-03-25T16:17:58.000Z
|
2022-02-06T18:03:31.000Z
|
src/tox_extra/hooks.py
|
tox-dev/tox-extra
|
9f65b4597754e8d926d81f1369bac312a078d996
|
[
"MIT"
] | 4
|
2021-03-25T16:26:48.000Z
|
2022-02-17T08:14:27.000Z
|
src/tox_extra/hooks.py
|
tox-dev/tox-strict
|
9f65b4597754e8d926d81f1369bac312a078d996
|
[
"MIT"
] | 2
|
2021-12-14T09:43:46.000Z
|
2022-02-16T12:00:07.000Z
|
"""Tox hook implementations."""
from __future__ import print_function
import logging
import os
import git
try:
from tox import hookimpl
from tox.reporter import error
@hookimpl
def tox_runtest_post(venv):
"""Hook that runs after test commands."""
if os.path.isdir(f"{venv.envconfig.config.toxinidir}/.git"):
_environ = dict(os.environ)
try:
repo = git.Repo(os.getcwd())
if repo.is_dirty(untracked_files=True):
error(
"Git reported dirty status. "
"Git should never report dirty status at the end of "
"testing, regardless if status is passed, failed or aborted."
)
os.system("git status")
venv.status = "failed"
finally:
os.environ.clear()
os.environ.update(_environ)
except ImportError:
# tox4
logging.error("tox-extra disabled itself as it does not support tox4 yet.")
| 30.514286
| 85
| 0.552434
|
acff524f42cfca9018532a44c0ae953f22f9d578
| 9,735
|
py
|
Python
|
ingestion/functions/parsing/colombia/colombia.py
|
rpoltorak/list
|
7c6a71baa22c1d3ae4ed3babd98616592a4df95b
|
[
"MIT"
] | null | null | null |
ingestion/functions/parsing/colombia/colombia.py
|
rpoltorak/list
|
7c6a71baa22c1d3ae4ed3babd98616592a4df95b
|
[
"MIT"
] | null | null | null |
ingestion/functions/parsing/colombia/colombia.py
|
rpoltorak/list
|
7c6a71baa22c1d3ae4ed3babd98616592a4df95b
|
[
"MIT"
] | null | null | null |
import os
import sys
from datetime import datetime
import csv
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'common/python'))
import parsing_lib
def convert_date(raw_date: str, dataserver=True):
"""
Convert raw date field into a value interpretable by the dataserver.
Removing timestamp as always midnight.
Set dataserver to False in order to return version appropriate for notes.
"""
date = datetime.strptime(raw_date.split(' ')[0], "%d/%m/%Y")
if not dataserver:
return date.strftime("%m/%d/%Y")
return date.strftime("%m/%d/%YZ")
def convert_gender(raw_gender):
if raw_gender == "M":
return "Male"
if raw_gender == "F":
return "Female"
return None
def convert_location(raw_entry):
query_terms = [
term for term in [
raw_entry.get("Nombre municipio", ""),
raw_entry.get("Nombre departamento", ""),
"Colombia"]
if term != '']
return {"query": ", ".join(query_terms)}
def convert_demographics(entry):
'''
This takes a whole row, and returns Age, Gender and Ethnicity in a dictionary.
Age is given as an int, but the adjacent field, 'Age Measurement Unit', determines what this int represents.
1 = Years; 2 = Months; 3 = Days
'''
ethnicity_map = {'1': 'Indigenous',
'2': 'ROM',
'3': 'Raizal',
'4': 'Palenquero',
'5': 'Black',
'6': 'Other'}
demo = {}
if entry['Edad']:
if str(entry['Unidad de medida de edad']) == '1':
demo["ageRange"] = {
"start": float(entry['Edad']),
"end": float(entry['Edad'])
}
elif str(entry['Unidad de medida de edad']) == '2':
demo["ageRange"] = {
"start": float(entry['Edad']) / 12,
"end": float(entry['Edad']) / 12
}
elif str(entry['Unidad de medida de edad']) == '3':
demo["ageRange"] = {
"start": float(entry['Edad']) / 365,
"end": float(entry['Edad']) / 365
}
if entry['Sexo']:
demo["gender"] = convert_gender(entry['Sexo'])
if entry['Pertenencia étnica']:
ethnicity = ethnicity_map.get(str(entry['Pertenencia étnica']), "")
if entry['Nombre del grupo étnico']:
ethnicity += f", {entry['Nombre del grupo étnico']}"
else:
ethnicity = 'Unknown'
demo["ethnicity"] = ethnicity
return demo or None
def parse_cases(raw_data_file, source_id, source_url):
"""
Parses G.h-format case data from raw API data.
Caveats:
- Assuming the date confirmed is the date of diagnosis (Fecha diagnostico) rather than
Fecha de notificación (generally several days earlier). When date of diagnosis, using date reported online as proxy.
- Case can have date of death, but 'Recuperado' column says recovered. This corresponds to patients who died but
not from Covid-19.
- Notes added include date reported online and date that SIVIGILA (national health alert system) was notiifed.
Also whether case was imported, and how patient recovery was confirmed.
- Tipo recuperación refers to how they decided the patient had recovered: either by 21 days elapsing since
symptoms, or a negative PCR/antigen test
- No dates for travel history, only distinction is between cases of type: 'Importado' vs. 'Relacionado'
"""
symptom_map = {'leve': 'Mild',
'moderado': 'Moderate',
'grave': 'Serious'}
with open(raw_data_file, "r") as f:
reader = csv.DictReader(f)
for entry in reader:
notes = []
case = {
"caseReference": {
"sourceId": source_id,
"sourceEntryId": entry["ID de caso"],
"sourceUrl": source_url
},
"location": convert_location(entry),
"demographics": convert_demographics(entry)
}
if entry["Fecha de diagnóstico"]:
case["events"] = [
{
"name": "confirmed",
"dateRange":
{
"start": convert_date(entry["Fecha de diagnóstico"]),
"end": convert_date(entry["Fecha de diagnóstico"])
}
},
]
else:
case["events"] = [
{
"name": "confirmed",
"dateRange":
{
"start": convert_date(entry["fecha reporte web"]),
"end": convert_date(entry["fecha reporte web"])
}
},
]
notes.append(
"No Date of Diagnosis provided, so using Date Reported Online (fecha reporte web) as a proxy. This is normally approx. 1 week later.")
# If patient was symptomatic, mark date of onsetSymptoms, otherwise
# asymptomatic
# maybe change to elif clause and check if can parse field as date
if entry["Fecha de inicio de síntomas"]:
case["symptoms"] = {
"status": "Symptomatic",
}
case["events"].append({
"name": "onsetSymptoms",
"dateRange": {
"start": convert_date(entry['Fecha de inicio de síntomas']),
"end": convert_date(entry['Fecha de inicio de síntomas']),
}
})
else:
case["symptoms"] = {
"status": "Asymptomatic",
}
# Include severity of symptoms
if entry["Estado"].lower() in symptom_map.keys():
case["symptoms"]["values"] = [
symptom_map.get(entry['Estado'].lower())]
# Patient Outcome
# If patient died, mark date
if entry["Fecha de muerte"]:
case["events"].append({
"name": "outcome",
"value": "Death",
"dateRange": {
"start": convert_date(entry['Fecha de muerte']),
"end": convert_date(entry['Fecha de muerte']),
}
})
if entry["Recuperado"].lower() != "fallecido":
notes.append("Died from something other than Covid-19.")
elif entry["Recuperado"].lower() == "recuperado":
case["events"].append({
"name": "outcome",
"value": "Recovered",
"dateRange": {
"start": convert_date(entry['Fecha de recuperación']),
"end": convert_date(entry['Fecha de recuperación']),
}
})
elif entry['Recuperado'].lower() == 'activo':
notes.append('Case still active')
if entry["Ubicación del caso"].lower() == "hospital":
case["events"].append({
"name": "hospitalAdmission",
"value": "Yes"
})
if entry["Ubicación del caso"].lower() == 'hospital uci':
case["events"].append({
"name": "icuAdmission",
"value": "Yes"
})
if entry["Ubicación del caso"].lower() == 'casa':
notes.append("Patient self-isolated and recovered at home.")
# Add notes for each case
# Travel History - we currently do not have any travel dates, so
# unknown whether in last 30 days
if entry["Tipo de contagio"].lower() == "importado":
notes.append(
f"Case is reported as importing the disease into Colombia, and country of origin is {entry['Nombre del país']}.")
elif entry["Tipo de contagio"].lower() == 'relacionado':
notes.append("Case was transmitted within Colombia.")
elif entry["Tipo de contagio"].lower() == 'en estudio':
notes.append(
"Case transmission under investigation (currently unknown).")
if entry['fecha reporte web']:
notes.append(
f"Date reported online was {convert_date(entry['fecha reporte web'],dataserver=False)}.")
if entry['Fecha de notificación']:
notes.append(
f"Date reported to SIVIGILA was {convert_date(entry['Fecha de notificación'],dataserver=False)}.")
if entry['Tipo de recuperación'] == 'PCR':
notes.append(
f"Patient recovery was confirmed by a negative PCR test.")
elif entry['Tipo de recuperación'] == 'Tiempo':
notes.append(
f"Patient recovery was confirmed by 21 days elapsing with no symptoms.")
if notes:
case["notes"] = " \n".join(notes)
yield case
def lambda_handler(event, context):
return parsing_lib.run_lambda(event, context, parse_cases)
| 37.298851
| 154
| 0.511043
|
acff543d3a14bb7b449cd19a06c5ad74ff72be3f
| 5,333
|
py
|
Python
|
mopidy/local/commands.py
|
rzr/mopidy
|
f6556ffafce34aebbc43ca266f69ac0068edc31d
|
[
"Apache-2.0"
] | null | null | null |
mopidy/local/commands.py
|
rzr/mopidy
|
f6556ffafce34aebbc43ca266f69ac0068edc31d
|
[
"Apache-2.0"
] | null | null | null |
mopidy/local/commands.py
|
rzr/mopidy
|
f6556ffafce34aebbc43ca266f69ac0068edc31d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, unicode_literals
import logging
import os
import time
from mopidy import commands, exceptions
from mopidy.audio import scan
from mopidy.utils import path
from . import translator
logger = logging.getLogger(__name__)
def _get_library(args, config):
libraries = dict((l.name, l) for l in args.registry['local:library'])
library_name = config['local']['library']
if library_name not in libraries:
logger.warning('Local library %s not found', library_name)
return 1
logger.debug('Using %s as the local library', library_name)
return libraries[library_name](config)
class LocalCommand(commands.Command):
def __init__(self):
super(LocalCommand, self).__init__()
self.add_child('scan', ScanCommand())
self.add_child('clear', ClearCommand())
class ClearCommand(commands.Command):
help = 'Clear local media files from the local library.'
def run(self, args, config):
library = _get_library(args, config)
prompt = '\nAre you sure you want to clear the library? [y/N] '
if raw_input(prompt).lower() != 'y':
print('Clearing library aborted.')
return 0
if library.clear():
print('Library successfully cleared.')
return 0
print('Unable to clear library.')
return 1
class ScanCommand(commands.Command):
help = 'Scan local media files and populate the local library.'
def __init__(self):
super(ScanCommand, self).__init__()
self.add_argument('--limit',
action='store', type=int, dest='limit', default=None,
help='Maxmimum number of tracks to scan')
def run(self, args, config):
media_dir = config['local']['media_dir']
scan_timeout = config['local']['scan_timeout']
flush_threshold = config['local']['scan_flush_threshold']
excluded_file_extensions = config['local']['excluded_file_extensions']
excluded_file_extensions = tuple(
bytes(file_ext.lower()) for file_ext in excluded_file_extensions)
library = _get_library(args, config)
uris_in_library = set()
uris_to_update = set()
uris_to_remove = set()
file_mtimes = path.find_mtimes(media_dir)
logger.info('Found %d files in media_dir.', len(file_mtimes))
num_tracks = library.load()
logger.info('Checking %d tracks from library.', num_tracks)
for track in library.begin():
abspath = translator.local_track_uri_to_path(track.uri, media_dir)
mtime = file_mtimes.pop(abspath, None)
if mtime is None:
logger.debug('Missing file %s', track.uri)
uris_to_remove.add(track.uri)
elif mtime > track.last_modified:
uris_in_library.add(track.uri)
logger.info('Removing %d missing tracks.', len(uris_to_remove))
for uri in uris_to_remove:
library.remove(uri)
for abspath in file_mtimes:
relpath = os.path.relpath(abspath, media_dir)
uri = translator.path_to_local_track_uri(relpath)
if relpath.lower().endswith(excluded_file_extensions):
logger.debug('Skipped %s: File extension excluded.', uri)
continue
uris_to_update.add(uri)
logger.info(
'Found %d tracks which need to be updated.', len(uris_to_update))
logger.info('Scanning...')
uris_to_update = sorted(uris_to_update, key=lambda v: v.lower())
uris_to_update = uris_to_update[:args.limit]
scanner = scan.Scanner(scan_timeout)
progress = _Progress(flush_threshold, len(uris_to_update))
for uri in uris_to_update:
try:
relpath = translator.local_track_uri_to_path(uri, media_dir)
file_uri = path.path_to_uri(os.path.join(media_dir, relpath))
data = scanner.scan(file_uri)
track = scan.audio_data_to_track(data).copy(uri=uri)
library.add(track)
logger.debug('Added %s', track.uri)
except exceptions.ScannerError as error:
logger.warning('Failed %s: %s', uri, error)
if progress.increment():
progress.log()
if library.flush():
logger.debug('Progress flushed.')
progress.log()
library.close()
logger.info('Done scanning.')
return 0
class _Progress(object):
def __init__(self, batch_size, total):
self.count = 0
self.batch_size = batch_size
self.total = total
self.start = time.time()
def increment(self):
self.count += 1
return self.batch_size and self.count % self.batch_size == 0
def log(self):
duration = time.time() - self.start
if self.count >= self.total or not self.count:
logger.info('Scanned %d of %d files in %ds.',
self.count, self.total, duration)
else:
remainder = duration / self.count * (self.total - self.count)
logger.info('Scanned %d of %d files in %ds, ~%ds left.',
self.count, self.total, duration, remainder)
| 33.753165
| 79
| 0.612413
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.